Commit a7b78bef authored by Stephen Boyd's avatar Stephen Boyd

Merge branch 'clk-rate-range' into clk-next

 - Various clk rate range fixes
 - Drop clk rate range constraints on clk_put() (redux)

* clk-rate-range: (28 commits)
  clk: mediatek: clk-mux: Add .determine_rate() callback
  clk: tests: Add tests for notifiers
  clk: Update req_rate on __clk_recalc_rates()
  clk: tests: Add missing test case for ranges
  clk: qcom: clk-rcg2: Take clock boundaries into consideration for gfx3d
  clk: Introduce the clk_hw_get_rate_range function
  clk: Zero the clk_rate_request structure
  clk: Stop forwarding clk_rate_requests to the parent
  clk: Constify clk_has_parent()
  clk: Introduce clk_core_has_parent()
  clk: Switch from __clk_determine_rate to clk_core_round_rate_nolock
  clk: Add our request boundaries in clk_core_init_rate_req
  clk: Introduce clk_hw_init_rate_request()
  clk: Move clk_core_init_rate_req() from clk_core_round_rate_nolock() to its caller
  clk: Change clk_core_init_rate_req prototype
  clk: Set req_rate on reparenting
  clk: Take into account uncached clocks in clk_set_rate_range()
  clk: tests: Add some tests for orphan with multiple parents
  clk: tests: Add tests for mux with multiple parents
  clk: tests: Add tests for single parent mux
  ...
parents c461c677 b05ea331
...@@ -136,7 +136,6 @@ static int clk_generated_determine_rate(struct clk_hw *hw, ...@@ -136,7 +136,6 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
{ {
struct clk_generated *gck = to_clk_generated(hw); struct clk_generated *gck = to_clk_generated(hw);
struct clk_hw *parent = NULL; struct clk_hw *parent = NULL;
struct clk_rate_request req_parent = *req;
long best_rate = -EINVAL; long best_rate = -EINVAL;
unsigned long min_rate, parent_rate; unsigned long min_rate, parent_rate;
int best_diff = -1; int best_diff = -1;
...@@ -192,7 +191,9 @@ static int clk_generated_determine_rate(struct clk_hw *hw, ...@@ -192,7 +191,9 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
goto end; goto end;
for (div = 1; div < GENERATED_MAX_DIV + 2; div++) { for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
req_parent.rate = req->rate * div; struct clk_rate_request req_parent;
clk_hw_forward_rate_request(hw, req, parent, &req_parent, req->rate * div);
if (__clk_determine_rate(parent, &req_parent)) if (__clk_determine_rate(parent, &req_parent))
continue; continue;
clk_generated_best_diff(req, parent, req_parent.rate, div, clk_generated_best_diff(req, parent, req_parent.rate, div,
......
...@@ -581,7 +581,6 @@ static int clk_sama7g5_master_determine_rate(struct clk_hw *hw, ...@@ -581,7 +581,6 @@ static int clk_sama7g5_master_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req) struct clk_rate_request *req)
{ {
struct clk_master *master = to_clk_master(hw); struct clk_master *master = to_clk_master(hw);
struct clk_rate_request req_parent = *req;
struct clk_hw *parent; struct clk_hw *parent;
long best_rate = LONG_MIN, best_diff = LONG_MIN; long best_rate = LONG_MIN, best_diff = LONG_MIN;
unsigned long parent_rate; unsigned long parent_rate;
...@@ -618,11 +617,15 @@ static int clk_sama7g5_master_determine_rate(struct clk_hw *hw, ...@@ -618,11 +617,15 @@ static int clk_sama7g5_master_determine_rate(struct clk_hw *hw,
goto end; goto end;
for (div = 0; div < MASTER_PRES_MAX + 1; div++) { for (div = 0; div < MASTER_PRES_MAX + 1; div++) {
struct clk_rate_request req_parent;
unsigned long req_rate;
if (div == MASTER_PRES_MAX) if (div == MASTER_PRES_MAX)
req_parent.rate = req->rate * 3; req_rate = req->rate * 3;
else else
req_parent.rate = req->rate << div; req_rate = req->rate << div;
clk_hw_forward_rate_request(hw, req, parent, &req_parent, req_rate);
if (__clk_determine_rate(parent, &req_parent)) if (__clk_determine_rate(parent, &req_parent))
continue; continue;
......
...@@ -269,7 +269,6 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw, ...@@ -269,7 +269,6 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
{ {
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
struct clk_hw *parent = clk_hw_get_parent(hw); struct clk_hw *parent = clk_hw_get_parent(hw);
struct clk_rate_request req_parent = *req;
unsigned long parent_rate = clk_hw_get_rate(parent); unsigned long parent_rate = clk_hw_get_rate(parent);
unsigned long tmp_rate; unsigned long tmp_rate;
long best_rate = LONG_MIN; long best_rate = LONG_MIN;
...@@ -302,8 +301,9 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw, ...@@ -302,8 +301,9 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
goto end; goto end;
for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) { for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
req_parent.rate = req->rate << shift; struct clk_rate_request req_parent;
clk_hw_forward_rate_request(hw, req, parent, &req_parent, req->rate << shift);
if (__clk_determine_rate(parent, &req_parent)) if (__clk_determine_rate(parent, &req_parent))
continue; continue;
......
...@@ -85,10 +85,11 @@ static int clk_composite_determine_rate(struct clk_hw *hw, ...@@ -85,10 +85,11 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
req->best_parent_hw = NULL; req->best_parent_hw = NULL;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) { if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) {
struct clk_rate_request tmp_req = *req; struct clk_rate_request tmp_req;
parent = clk_hw_get_parent(mux_hw); parent = clk_hw_get_parent(mux_hw);
clk_hw_forward_rate_request(hw, req, parent, &tmp_req, req->rate);
ret = clk_composite_determine_rate_for_parent(rate_hw, ret = clk_composite_determine_rate_for_parent(rate_hw,
&tmp_req, &tmp_req,
parent, parent,
...@@ -104,12 +105,13 @@ static int clk_composite_determine_rate(struct clk_hw *hw, ...@@ -104,12 +105,13 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
} }
for (i = 0; i < clk_hw_get_num_parents(mux_hw); i++) { for (i = 0; i < clk_hw_get_num_parents(mux_hw); i++) {
struct clk_rate_request tmp_req = *req; struct clk_rate_request tmp_req;
parent = clk_hw_get_parent_by_index(mux_hw, i); parent = clk_hw_get_parent_by_index(mux_hw, i);
if (!parent) if (!parent)
continue; continue;
clk_hw_forward_rate_request(hw, req, parent, &tmp_req, req->rate);
ret = clk_composite_determine_rate_for_parent(rate_hw, ret = clk_composite_determine_rate_for_parent(rate_hw,
&tmp_req, &tmp_req,
parent, parent,
......
...@@ -386,13 +386,13 @@ long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, ...@@ -386,13 +386,13 @@ long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
const struct clk_div_table *table, const struct clk_div_table *table,
u8 width, unsigned long flags) u8 width, unsigned long flags)
{ {
struct clk_rate_request req = { struct clk_rate_request req;
.rate = rate,
.best_parent_rate = *prate,
.best_parent_hw = parent,
};
int ret; int ret;
clk_hw_init_rate_request(hw, &req, rate);
req.best_parent_rate = *prate;
req.best_parent_hw = parent;
ret = divider_determine_rate(hw, &req, table, width, flags); ret = divider_determine_rate(hw, &req, table, width, flags);
if (ret) if (ret)
return ret; return ret;
...@@ -408,13 +408,13 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, ...@@ -408,13 +408,13 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
const struct clk_div_table *table, u8 width, const struct clk_div_table *table, u8 width,
unsigned long flags, unsigned int val) unsigned long flags, unsigned int val)
{ {
struct clk_rate_request req = { struct clk_rate_request req;
.rate = rate,
.best_parent_rate = *prate,
.best_parent_hw = parent,
};
int ret; int ret;
clk_hw_init_rate_request(hw, &req, rate);
req.best_parent_rate = *prate;
req.best_parent_hw = parent;
ret = divider_ro_determine_rate(hw, &req, table, width, flags, val); ret = divider_ro_determine_rate(hw, &req, table, width, flags, val);
if (ret) if (ret)
return ret; return ret;
......
...@@ -536,6 +536,53 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now, ...@@ -536,6 +536,53 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now,
return now <= rate && now > best; return now <= rate && now > best;
} }
static void clk_core_init_rate_req(struct clk_core * const core,
struct clk_rate_request *req,
unsigned long rate);
static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request *req);
static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent)
{
struct clk_core *tmp;
unsigned int i;
/* Optimize for the case where the parent is already the parent. */
if (core->parent == parent)
return true;
for (i = 0; i < core->num_parents; i++) {
tmp = clk_core_get_parent_by_index(core, i);
if (!tmp)
continue;
if (tmp == parent)
return true;
}
return false;
}
static void
clk_core_forward_rate_req(struct clk_core *core,
const struct clk_rate_request *old_req,
struct clk_core *parent,
struct clk_rate_request *req,
unsigned long parent_rate)
{
if (WARN_ON(!clk_core_has_parent(core, parent)))
return;
clk_core_init_rate_req(parent, req, parent_rate);
if (req->min_rate < old_req->min_rate)
req->min_rate = old_req->min_rate;
if (req->max_rate > old_req->max_rate)
req->max_rate = old_req->max_rate;
}
int clk_mux_determine_rate_flags(struct clk_hw *hw, int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_rate_request *req, struct clk_rate_request *req,
unsigned long flags) unsigned long flags)
...@@ -543,14 +590,20 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw, ...@@ -543,14 +590,20 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_core *core = hw->core, *parent, *best_parent = NULL; struct clk_core *core = hw->core, *parent, *best_parent = NULL;
int i, num_parents, ret; int i, num_parents, ret;
unsigned long best = 0; unsigned long best = 0;
struct clk_rate_request parent_req = *req;
/* if NO_REPARENT flag set, pass through to current parent */ /* if NO_REPARENT flag set, pass through to current parent */
if (core->flags & CLK_SET_RATE_NO_REPARENT) { if (core->flags & CLK_SET_RATE_NO_REPARENT) {
parent = core->parent; parent = core->parent;
if (core->flags & CLK_SET_RATE_PARENT) { if (core->flags & CLK_SET_RATE_PARENT) {
ret = __clk_determine_rate(parent ? parent->hw : NULL, struct clk_rate_request parent_req;
&parent_req);
if (!parent) {
req->rate = 0;
return 0;
}
clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
ret = clk_core_round_rate_nolock(parent, &parent_req);
if (ret) if (ret)
return ret; return ret;
...@@ -567,23 +620,29 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw, ...@@ -567,23 +620,29 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw,
/* find the parent that can provide the fastest rate <= rate */ /* find the parent that can provide the fastest rate <= rate */
num_parents = core->num_parents; num_parents = core->num_parents;
for (i = 0; i < num_parents; i++) { for (i = 0; i < num_parents; i++) {
unsigned long parent_rate;
parent = clk_core_get_parent_by_index(core, i); parent = clk_core_get_parent_by_index(core, i);
if (!parent) if (!parent)
continue; continue;
if (core->flags & CLK_SET_RATE_PARENT) { if (core->flags & CLK_SET_RATE_PARENT) {
parent_req = *req; struct clk_rate_request parent_req;
ret = __clk_determine_rate(parent->hw, &parent_req);
clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
ret = clk_core_round_rate_nolock(parent, &parent_req);
if (ret) if (ret)
continue; continue;
parent_rate = parent_req.rate;
} else { } else {
parent_req.rate = clk_core_get_rate_nolock(parent); parent_rate = clk_core_get_rate_nolock(parent);
} }
if (mux_is_better_rate(req->rate, parent_req.rate, if (mux_is_better_rate(req->rate, parent_rate,
best, flags)) { best, flags)) {
best_parent = parent; best_parent = parent;
best = parent_req.rate; best = parent_rate;
} }
} }
...@@ -625,6 +684,22 @@ static void clk_core_get_boundaries(struct clk_core *core, ...@@ -625,6 +684,22 @@ static void clk_core_get_boundaries(struct clk_core *core,
*max_rate = min(*max_rate, clk_user->max_rate); *max_rate = min(*max_rate, clk_user->max_rate);
} }
/*
* clk_hw_get_rate_range() - returns the clock rate range for a hw clk
* @hw: the hw clk we want to get the range from
* @min_rate: pointer to the variable that will hold the minimum
* @max_rate: pointer to the variable that will hold the maximum
*
* Fills the @min_rate and @max_rate variables with the minimum and
* maximum that clock can reach.
*/
void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
unsigned long *max_rate)
{
clk_core_get_boundaries(hw->core, min_rate, max_rate);
}
EXPORT_SYMBOL_GPL(clk_hw_get_rate_range);
static bool clk_core_check_boundaries(struct clk_core *core, static bool clk_core_check_boundaries(struct clk_core *core,
unsigned long min_rate, unsigned long min_rate,
unsigned long max_rate) unsigned long max_rate)
...@@ -1341,7 +1416,19 @@ static int clk_core_determine_round_nolock(struct clk_core *core, ...@@ -1341,7 +1416,19 @@ static int clk_core_determine_round_nolock(struct clk_core *core,
if (!core) if (!core)
return 0; return 0;
req->rate = clamp(req->rate, req->min_rate, req->max_rate); /*
* Some clock providers hand-craft their clk_rate_requests and
* might not fill min_rate and max_rate.
*
* If it's the case, clamping the rate is equivalent to setting
* the rate to 0 which is bad. Skip the clamping but complain so
* that it gets fixed, hopefully.
*/
if (!req->min_rate && !req->max_rate)
pr_warn("%s: %s: clk_rate_request has initialized min or max rate.\n",
__func__, core->name);
else
req->rate = clamp(req->rate, req->min_rate, req->max_rate);
/* /*
* At this point, core protection will be disabled * At this point, core protection will be disabled
...@@ -1368,13 +1455,19 @@ static int clk_core_determine_round_nolock(struct clk_core *core, ...@@ -1368,13 +1455,19 @@ static int clk_core_determine_round_nolock(struct clk_core *core,
} }
static void clk_core_init_rate_req(struct clk_core * const core, static void clk_core_init_rate_req(struct clk_core * const core,
struct clk_rate_request *req) struct clk_rate_request *req,
unsigned long rate)
{ {
struct clk_core *parent; struct clk_core *parent;
if (WARN_ON(!core || !req)) if (WARN_ON(!core || !req))
return; return;
memset(req, 0, sizeof(*req));
req->rate = rate;
clk_core_get_boundaries(core, &req->min_rate, &req->max_rate);
parent = core->parent; parent = core->parent;
if (parent) { if (parent) {
req->best_parent_hw = parent->hw; req->best_parent_hw = parent->hw;
...@@ -1385,6 +1478,51 @@ static void clk_core_init_rate_req(struct clk_core * const core, ...@@ -1385,6 +1478,51 @@ static void clk_core_init_rate_req(struct clk_core * const core,
} }
} }
/**
* clk_hw_init_rate_request - Initializes a clk_rate_request
* @hw: the clk for which we want to submit a rate request
* @req: the clk_rate_request structure we want to initialise
* @rate: the rate which is to be requested
*
* Initializes a clk_rate_request structure to submit to
* __clk_determine_rate() or similar functions.
*/
void clk_hw_init_rate_request(const struct clk_hw *hw,
struct clk_rate_request *req,
unsigned long rate)
{
if (WARN_ON(!hw || !req))
return;
clk_core_init_rate_req(hw->core, req, rate);
}
EXPORT_SYMBOL_GPL(clk_hw_init_rate_request);
/**
* clk_hw_forward_rate_request - Forwards a clk_rate_request to a clock's parent
* @hw: the original clock that got the rate request
* @old_req: the original clk_rate_request structure we want to forward
* @parent: the clk we want to forward @old_req to
* @req: the clk_rate_request structure we want to initialise
* @parent_rate: The rate which is to be requested to @parent
*
* Initializes a clk_rate_request structure to submit to a clock parent
* in __clk_determine_rate() or similar functions.
*/
void clk_hw_forward_rate_request(const struct clk_hw *hw,
const struct clk_rate_request *old_req,
const struct clk_hw *parent,
struct clk_rate_request *req,
unsigned long parent_rate)
{
if (WARN_ON(!hw || !old_req || !parent || !req))
return;
clk_core_forward_rate_req(hw->core, old_req,
parent->core, req,
parent_rate);
}
static bool clk_core_can_round(struct clk_core * const core) static bool clk_core_can_round(struct clk_core * const core)
{ {
return core->ops->determine_rate || core->ops->round_rate; return core->ops->determine_rate || core->ops->round_rate;
...@@ -1393,6 +1531,8 @@ static bool clk_core_can_round(struct clk_core * const core) ...@@ -1393,6 +1531,8 @@ static bool clk_core_can_round(struct clk_core * const core)
static int clk_core_round_rate_nolock(struct clk_core *core, static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request *req) struct clk_rate_request *req)
{ {
int ret;
lockdep_assert_held(&prepare_lock); lockdep_assert_held(&prepare_lock);
if (!core) { if (!core) {
...@@ -1400,12 +1540,22 @@ static int clk_core_round_rate_nolock(struct clk_core *core, ...@@ -1400,12 +1540,22 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
return 0; return 0;
} }
clk_core_init_rate_req(core, req);
if (clk_core_can_round(core)) if (clk_core_can_round(core))
return clk_core_determine_round_nolock(core, req); return clk_core_determine_round_nolock(core, req);
else if (core->flags & CLK_SET_RATE_PARENT)
return clk_core_round_rate_nolock(core->parent, req); if (core->flags & CLK_SET_RATE_PARENT) {
struct clk_rate_request parent_req;
clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate);
ret = clk_core_round_rate_nolock(core->parent, &parent_req);
if (ret)
return ret;
req->best_parent_rate = parent_req.rate;
req->rate = parent_req.rate;
return 0;
}
req->rate = core->rate; req->rate = core->rate;
return 0; return 0;
...@@ -1449,8 +1599,7 @@ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) ...@@ -1449,8 +1599,7 @@ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
int ret; int ret;
struct clk_rate_request req; struct clk_rate_request req;
clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); clk_core_init_rate_req(hw->core, &req, rate);
req.rate = rate;
ret = clk_core_round_rate_nolock(hw->core, &req); ret = clk_core_round_rate_nolock(hw->core, &req);
if (ret) if (ret)
...@@ -1482,8 +1631,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate) ...@@ -1482,8 +1631,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
if (clk->exclusive_count) if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core); clk_core_rate_unprotect(clk->core);
clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); clk_core_init_rate_req(clk->core, &req, rate);
req.rate = rate;
ret = clk_core_round_rate_nolock(clk->core, &req); ret = clk_core_round_rate_nolock(clk->core, &req);
...@@ -1612,6 +1760,7 @@ static unsigned long clk_recalc(struct clk_core *core, ...@@ -1612,6 +1760,7 @@ static unsigned long clk_recalc(struct clk_core *core,
/** /**
* __clk_recalc_rates * __clk_recalc_rates
* @core: first clk in the subtree * @core: first clk in the subtree
* @update_req: Whether req_rate should be updated with the new rate
* @msg: notification type (see include/linux/clk.h) * @msg: notification type (see include/linux/clk.h)
* *
* Walks the subtree of clks starting with clk and recalculates rates as it * Walks the subtree of clks starting with clk and recalculates rates as it
...@@ -1621,7 +1770,8 @@ static unsigned long clk_recalc(struct clk_core *core, ...@@ -1621,7 +1770,8 @@ static unsigned long clk_recalc(struct clk_core *core,
* clk_recalc_rates also propagates the POST_RATE_CHANGE notification, * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
* if necessary. * if necessary.
*/ */
static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) static void __clk_recalc_rates(struct clk_core *core, bool update_req,
unsigned long msg)
{ {
unsigned long old_rate; unsigned long old_rate;
unsigned long parent_rate = 0; unsigned long parent_rate = 0;
...@@ -1635,6 +1785,8 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) ...@@ -1635,6 +1785,8 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
parent_rate = core->parent->rate; parent_rate = core->parent->rate;
core->rate = clk_recalc(core, parent_rate); core->rate = clk_recalc(core, parent_rate);
if (update_req)
core->req_rate = core->rate;
/* /*
* ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
...@@ -1644,13 +1796,13 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) ...@@ -1644,13 +1796,13 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
__clk_notify(core, msg, old_rate, core->rate); __clk_notify(core, msg, old_rate, core->rate);
hlist_for_each_entry(child, &core->children, child_node) hlist_for_each_entry(child, &core->children, child_node)
__clk_recalc_rates(child, msg); __clk_recalc_rates(child, update_req, msg);
} }
static unsigned long clk_core_get_rate_recalc(struct clk_core *core) static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
{ {
if (core && (core->flags & CLK_GET_RATE_NOCACHE)) if (core && (core->flags & CLK_GET_RATE_NOCACHE))
__clk_recalc_rates(core, 0); __clk_recalc_rates(core, false, 0);
return clk_core_get_rate_nolock(core); return clk_core_get_rate_nolock(core);
} }
...@@ -1660,8 +1812,9 @@ static unsigned long clk_core_get_rate_recalc(struct clk_core *core) ...@@ -1660,8 +1812,9 @@ static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
* @clk: the clk whose rate is being returned * @clk: the clk whose rate is being returned
* *
* Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
* is set, which means a recalc_rate will be issued. * is set, which means a recalc_rate will be issued. Can be called regardless of
* If clk is NULL then returns 0. * the clock enabledness. If clk is NULL, or if an error occurred, then returns
* 0.
*/ */
unsigned long clk_get_rate(struct clk *clk) unsigned long clk_get_rate(struct clk *clk)
{ {
...@@ -1865,6 +2018,7 @@ static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, ...@@ -1865,6 +2018,7 @@ static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
flags = clk_enable_lock(); flags = clk_enable_lock();
clk_reparent(core, old_parent); clk_reparent(core, old_parent);
clk_enable_unlock(flags); clk_enable_unlock(flags);
__clk_set_parent_after(core, old_parent, parent); __clk_set_parent_after(core, old_parent, parent);
return ret; return ret;
...@@ -1970,11 +2124,7 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core, ...@@ -1970,11 +2124,7 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
if (clk_core_can_round(core)) { if (clk_core_can_round(core)) {
struct clk_rate_request req; struct clk_rate_request req;
req.rate = rate; clk_core_init_rate_req(core, &req, rate);
req.min_rate = min_rate;
req.max_rate = max_rate;
clk_core_init_rate_req(core, &req);
ret = clk_core_determine_round_nolock(core, &req); ret = clk_core_determine_round_nolock(core, &req);
if (ret < 0) if (ret < 0)
...@@ -2173,8 +2323,7 @@ static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, ...@@ -2173,8 +2323,7 @@ static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
if (cnt < 0) if (cnt < 0)
return cnt; return cnt;
clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); clk_core_init_rate_req(core, &req, req_rate);
req.rate = req_rate;
ret = clk_core_round_rate_nolock(core, &req); ret = clk_core_round_rate_nolock(core, &req);
...@@ -2325,19 +2474,15 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) ...@@ -2325,19 +2474,15 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
} }
EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
/** static int clk_set_rate_range_nolock(struct clk *clk,
* clk_set_rate_range - set a rate range for a clock source unsigned long min,
* @clk: clock source unsigned long max)
* @min: desired minimum clock rate in Hz, inclusive
* @max: desired maximum clock rate in Hz, inclusive
*
* Returns success (0) or negative errno.
*/
int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
{ {
int ret = 0; int ret = 0;
unsigned long old_min, old_max, rate; unsigned long old_min, old_max, rate;
lockdep_assert_held(&prepare_lock);
if (!clk) if (!clk)
return 0; return 0;
...@@ -2350,8 +2495,6 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) ...@@ -2350,8 +2495,6 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
return -EINVAL; return -EINVAL;
} }
clk_prepare_lock();
if (clk->exclusive_count) if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core); clk_core_rate_unprotect(clk->core);
...@@ -2366,6 +2509,10 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) ...@@ -2366,6 +2509,10 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
goto out; goto out;
} }
rate = clk->core->req_rate;
if (clk->core->flags & CLK_GET_RATE_NOCACHE)
rate = clk_core_get_rate_recalc(clk->core);
/* /*
* Since the boundaries have been changed, let's give the * Since the boundaries have been changed, let's give the
* opportunity to the provider to adjust the clock rate based on * opportunity to the provider to adjust the clock rate based on
...@@ -2383,7 +2530,7 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) ...@@ -2383,7 +2530,7 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
* - the determine_rate() callback does not really check for * - the determine_rate() callback does not really check for
* this corner case when determining the rate * this corner case when determining the rate
*/ */
rate = clamp(clk->core->req_rate, min, max); rate = clamp(rate, min, max);
ret = clk_core_set_rate_nolock(clk->core, rate); ret = clk_core_set_rate_nolock(clk->core, rate);
if (ret) { if (ret) {
/* rollback the changes */ /* rollback the changes */
...@@ -2395,6 +2542,28 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) ...@@ -2395,6 +2542,28 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
if (clk->exclusive_count) if (clk->exclusive_count)
clk_core_rate_protect(clk->core); clk_core_rate_protect(clk->core);
return ret;
}
/**
* clk_set_rate_range - set a rate range for a clock source
* @clk: clock source
* @min: desired minimum clock rate in Hz, inclusive
* @max: desired maximum clock rate in Hz, inclusive
*
* Return: 0 for success or negative errno on failure.
*/
int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
{
int ret;
if (!clk)
return 0;
clk_prepare_lock();
ret = clk_set_rate_range_nolock(clk, min, max);
clk_prepare_unlock(); clk_prepare_unlock();
return ret; return ret;
...@@ -2474,7 +2643,7 @@ static void clk_core_reparent(struct clk_core *core, ...@@ -2474,7 +2643,7 @@ static void clk_core_reparent(struct clk_core *core,
{ {
clk_reparent(core, new_parent); clk_reparent(core, new_parent);
__clk_recalc_accuracies(core); __clk_recalc_accuracies(core);
__clk_recalc_rates(core, POST_RATE_CHANGE); __clk_recalc_rates(core, true, POST_RATE_CHANGE);
} }
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
...@@ -2495,27 +2664,13 @@ void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) ...@@ -2495,27 +2664,13 @@ void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
* *
* Returns true if @parent is a possible parent for @clk, false otherwise. * Returns true if @parent is a possible parent for @clk, false otherwise.
*/ */
bool clk_has_parent(struct clk *clk, struct clk *parent) bool clk_has_parent(const struct clk *clk, const struct clk *parent)
{ {
struct clk_core *core, *parent_core;
int i;
/* NULL clocks should be nops, so return success if either is NULL. */ /* NULL clocks should be nops, so return success if either is NULL. */
if (!clk || !parent) if (!clk || !parent)
return true; return true;
core = clk->core; return clk_core_has_parent(clk->core, parent->core);
parent_core = parent->core;
/* Optimize for the case where the parent is already the parent. */
if (core->parent == parent_core)
return true;
for (i = 0; i < core->num_parents; i++)
if (!strcmp(core->parents[i].name, parent_core->name))
return true;
return false;
} }
EXPORT_SYMBOL_GPL(clk_has_parent); EXPORT_SYMBOL_GPL(clk_has_parent);
...@@ -2572,9 +2727,9 @@ static int clk_core_set_parent_nolock(struct clk_core *core, ...@@ -2572,9 +2727,9 @@ static int clk_core_set_parent_nolock(struct clk_core *core,
/* propagate rate an accuracy recalculation accordingly */ /* propagate rate an accuracy recalculation accordingly */
if (ret) { if (ret) {
__clk_recalc_rates(core, ABORT_RATE_CHANGE); __clk_recalc_rates(core, true, ABORT_RATE_CHANGE);
} else { } else {
__clk_recalc_rates(core, POST_RATE_CHANGE); __clk_recalc_rates(core, true, POST_RATE_CHANGE);
__clk_recalc_accuracies(core); __clk_recalc_accuracies(core);
} }
...@@ -3471,7 +3626,7 @@ static void clk_core_reparent_orphans_nolock(void) ...@@ -3471,7 +3626,7 @@ static void clk_core_reparent_orphans_nolock(void)
__clk_set_parent_before(orphan, parent); __clk_set_parent_before(orphan, parent);
__clk_set_parent_after(orphan, parent, NULL); __clk_set_parent_after(orphan, parent, NULL);
__clk_recalc_accuracies(orphan); __clk_recalc_accuracies(orphan);
__clk_recalc_rates(orphan, 0); __clk_recalc_rates(orphan, true, 0);
/* /*
* __clk_init_parent() will set the initial req_rate to * __clk_init_parent() will set the initial req_rate to
...@@ -4347,9 +4502,10 @@ void __clk_put(struct clk *clk) ...@@ -4347,9 +4502,10 @@ void __clk_put(struct clk *clk)
} }
hlist_del(&clk->clks_node); hlist_del(&clk->clks_node);
if (clk->min_rate > clk->core->req_rate ||
clk->max_rate < clk->core->req_rate) /* If we had any boundaries on that clock, let's drop them. */
clk_core_set_rate_nolock(clk->core, clk->core->req_rate); if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
owner = clk->core->owner; owner = clk->core->owner;
kref_put(&clk->core->ref, __clk_release); kref_put(&clk->core->ref, __clk_release);
......
...@@ -108,6 +108,39 @@ static const struct clk_ops clk_dummy_single_parent_ops = { ...@@ -108,6 +108,39 @@ static const struct clk_ops clk_dummy_single_parent_ops = {
.get_parent = clk_dummy_single_get_parent, .get_parent = clk_dummy_single_get_parent,
}; };
struct clk_multiple_parent_ctx {
struct clk_dummy_context parents_ctx[2];
struct clk_hw hw;
u8 current_parent;
};
static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_multiple_parent_ctx *ctx =
container_of(hw, struct clk_multiple_parent_ctx, hw);
if (index >= clk_hw_get_num_parents(hw))
return -EINVAL;
ctx->current_parent = index;
return 0;
}
static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
{
struct clk_multiple_parent_ctx *ctx =
container_of(hw, struct clk_multiple_parent_ctx, hw);
return ctx->current_parent;
}
static const struct clk_ops clk_multiple_parents_mux_ops = {
.get_parent = clk_multiple_parents_mux_get_parent,
.set_parent = clk_multiple_parents_mux_set_parent,
.determine_rate = __clk_mux_determine_rate_closest,
};
static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops) static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
{ {
struct clk_dummy_context *ctx; struct clk_dummy_context *ctx;
...@@ -160,12 +193,14 @@ static void clk_test_get_rate(struct kunit *test) ...@@ -160,12 +193,14 @@ static void clk_test_get_rate(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate; unsigned long rate;
rate = clk_get_rate(clk); rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0); KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, ctx->rate); KUNIT_EXPECT_EQ(test, rate, ctx->rate);
clk_put(clk);
} }
/* /*
...@@ -179,7 +214,7 @@ static void clk_test_set_get_rate(struct kunit *test) ...@@ -179,7 +214,7 @@ static void clk_test_set_get_rate(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate; unsigned long rate;
KUNIT_ASSERT_EQ(test, KUNIT_ASSERT_EQ(test,
...@@ -189,6 +224,8 @@ static void clk_test_set_get_rate(struct kunit *test) ...@@ -189,6 +224,8 @@ static void clk_test_set_get_rate(struct kunit *test)
rate = clk_get_rate(clk); rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0); KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_put(clk);
} }
/* /*
...@@ -202,7 +239,7 @@ static void clk_test_set_set_get_rate(struct kunit *test) ...@@ -202,7 +239,7 @@ static void clk_test_set_set_get_rate(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate; unsigned long rate;
KUNIT_ASSERT_EQ(test, KUNIT_ASSERT_EQ(test,
...@@ -216,6 +253,8 @@ static void clk_test_set_set_get_rate(struct kunit *test) ...@@ -216,6 +253,8 @@ static void clk_test_set_set_get_rate(struct kunit *test)
rate = clk_get_rate(clk); rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0); KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2); KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
} }
/* /*
...@@ -226,7 +265,7 @@ static void clk_test_round_set_get_rate(struct kunit *test) ...@@ -226,7 +265,7 @@ static void clk_test_round_set_get_rate(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rounded_rate, set_rate; unsigned long rounded_rate, set_rate;
rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1); rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
...@@ -240,6 +279,8 @@ static void clk_test_round_set_get_rate(struct kunit *test) ...@@ -240,6 +279,8 @@ static void clk_test_round_set_get_rate(struct kunit *test)
set_rate = clk_get_rate(clk); set_rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, set_rate, 0); KUNIT_ASSERT_GT(test, set_rate, 0);
KUNIT_EXPECT_EQ(test, rounded_rate, set_rate); KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
clk_put(clk);
} }
static struct kunit_case clk_test_cases[] = { static struct kunit_case clk_test_cases[] = {
...@@ -250,6 +291,11 @@ static struct kunit_case clk_test_cases[] = { ...@@ -250,6 +291,11 @@ static struct kunit_case clk_test_cases[] = {
{} {}
}; };
/*
* Test suite for a basic rate clock, without any parent.
*
* These tests exercise the rate API with simple scenarios
*/
static struct kunit_suite clk_test_suite = { static struct kunit_suite clk_test_suite = {
.name = "clk-test", .name = "clk-test",
.init = clk_test_init, .init = clk_test_init,
...@@ -257,16 +303,132 @@ static struct kunit_suite clk_test_suite = { ...@@ -257,16 +303,132 @@ static struct kunit_suite clk_test_suite = {
.test_cases = clk_test_cases, .test_cases = clk_test_cases,
}; };
struct clk_single_parent_ctx { static int clk_uncached_test_init(struct kunit *test)
struct clk_dummy_context parent_ctx; {
struct clk_hw hw; struct clk_dummy_context *ctx;
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
ctx->rate = DUMMY_CLOCK_INIT_RATE;
ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
&clk_dummy_rate_ops,
CLK_GET_RATE_NOCACHE);
ret = clk_hw_register(NULL, &ctx->hw);
if (ret)
return ret;
return 0;
}
/*
* Test that for an uncached clock, the clock framework doesn't cache
* the rate and clk_get_rate() will return the underlying clock rate
* even if it changed.
*/
static void clk_test_uncached_get_rate(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
/* We change the rate behind the clock framework's back */
ctx->rate = DUMMY_CLOCK_RATE_1;
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_put(clk);
}
/*
* Test that for an uncached clock, clk_set_rate_range() will work
* properly if the rate hasn't changed.
*/
static void clk_test_uncached_set_range(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
}
/*
* Test that for an uncached clock, clk_set_rate_range() will work
* properly if the rate has changed in hardware.
*
* In this case, it means that if the rate wasn't initially in the range
* we're trying to set, but got changed at some point into the range
* without the kernel knowing about it, its rate shouldn't be affected.
*/
static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
/* We change the rate behind the clock framework's back */
ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
clk_put(clk);
}
static struct kunit_case clk_uncached_test_cases[] = {
KUNIT_CASE(clk_test_uncached_get_rate),
KUNIT_CASE(clk_test_uncached_set_range),
KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
{}
}; };
static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test) /*
* Test suite for a basic, uncached, rate clock, without any parent.
*
* These tests exercise the rate API with simple scenarios
*/
static struct kunit_suite clk_uncached_test_suite = {
.name = "clk-uncached-test",
.init = clk_uncached_test_init,
.exit = clk_test_exit,
.test_cases = clk_uncached_test_cases,
};
static int
clk_multiple_parents_mux_test_init(struct kunit *test)
{ {
struct clk_single_parent_ctx *ctx; struct clk_multiple_parent_ctx *ctx;
struct clk_init_data init = { }; const char *parents[2] = { "parent-0", "parent-1"};
const char * const parents[] = { "orphan_parent" };
int ret; int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
...@@ -274,73 +436,993 @@ static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test ...@@ -274,73 +436,993 @@ static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test
return -ENOMEM; return -ENOMEM;
test->priv = ctx; test->priv = ctx;
init.name = "test_orphan_dummy_parent"; ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
init.ops = &clk_dummy_single_parent_ops; &clk_dummy_rate_ops,
init.parent_names = parents; 0);
init.num_parents = ARRAY_SIZE(parents); ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
init.flags = CLK_SET_RATE_PARENT; ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
ctx->hw.init = &init; if (ret)
return ret;
ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
&clk_dummy_rate_ops,
0);
ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
if (ret)
return ret;
ctx->current_parent = 0;
ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
&clk_multiple_parents_mux_ops,
CLK_SET_RATE_PARENT);
ret = clk_hw_register(NULL, &ctx->hw); ret = clk_hw_register(NULL, &ctx->hw);
if (ret) if (ret)
return ret; return ret;
memset(&init, 0, sizeof(init)); return 0;
init.name = "orphan_parent"; }
init.ops = &clk_dummy_rate_ops;
ctx->parent_ctx.hw.init = &init;
ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
ret = clk_hw_register(NULL, &ctx->parent_ctx.hw); static void
clk_multiple_parents_mux_test_exit(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
clk_hw_unregister(&ctx->hw);
clk_hw_unregister(&ctx->parents_ctx[0].hw);
clk_hw_unregister(&ctx->parents_ctx[1].hw);
}
/*
* Test that for a clock with multiple parents, clk_get_parent()
* actually returns the current one.
*/
static void
clk_test_multiple_parents_mux_get_parent(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
clk_put(parent);
clk_put(clk);
}
/*
* Test that for a clock with a multiple parents, clk_has_parent()
* actually reports all of them as parents.
*/
static void
clk_test_multiple_parents_mux_has_parent(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
clk_put(parent);
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
clk_put(parent);
clk_put(clk);
}
/*
* Test that for a clock with a multiple parents, if we set a range on
* that clock and the parent is changed, its rate after the reparenting
* is still within the range we asked for.
*
* FIXME: clk_set_parent() only does the reparenting but doesn't
* reevaluate whether the new clock rate is within its boundaries or
* not.
*/
static void
clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent1, *parent2;
unsigned long rate;
int ret;
kunit_skip(test, "This needs to be fixed in the core.");
parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1 - 1000,
DUMMY_CLOCK_RATE_1 + 1000);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_parent(clk, parent2);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
clk_put(parent2);
clk_put(parent1);
clk_put(clk);
}
static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
{}
};
/*
* Test suite for a basic mux clock with two parents, with
* CLK_SET_RATE_PARENT on the child.
*
* These tests exercise the consumer API and check that the state of the
* child and parents are sane and consistent.
*/
static struct kunit_suite
clk_multiple_parents_mux_test_suite = {
.name = "clk-multiple-parents-mux-test",
.init = clk_multiple_parents_mux_test_init,
.exit = clk_multiple_parents_mux_test_exit,
.test_cases = clk_multiple_parents_mux_test_cases,
};
static int
clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx;
const char *parents[2] = { "missing-parent", "proper-parent"};
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
&clk_dummy_rate_ops,
0);
ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
if (ret)
return ret;
ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
&clk_multiple_parents_mux_ops,
CLK_SET_RATE_PARENT);
ret = clk_hw_register(NULL, &ctx->hw);
if (ret) if (ret)
return ret; return ret;
return 0; return 0;
} }
static void clk_orphan_transparent_single_parent_mux_test_exit(struct kunit *test) static void
clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
{ {
struct clk_single_parent_ctx *ctx = test->priv; struct clk_multiple_parent_ctx *ctx = test->priv;
clk_hw_unregister(&ctx->hw); clk_hw_unregister(&ctx->hw);
clk_hw_unregister(&ctx->parent_ctx.hw); clk_hw_unregister(&ctx->parents_ctx[1].hw);
} }
/* /*
* Test that a mux-only clock, with an initial rate within a range, * Test that, for a mux whose current parent hasn't been registered yet and is
* will still have the same rate after the range has been enforced. * thus orphan, clk_get_parent() will return NULL.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
clk_put(clk);
}
/*
* Test that, for a mux whose current parent hasn't been registered yet,
* calling clk_set_parent() to a valid parent will properly update the
* mux parent and its orphan status.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent, *new_parent;
int ret;
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
ret = clk_set_parent(clk, parent);
KUNIT_ASSERT_EQ(test, ret, 0);
new_parent = clk_get_parent(clk);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
clk_put(parent);
clk_put(clk);
}
/*
* Test that, for a mux that started orphan but got switched to a valid
* parent, calling clk_drop_range() on the mux won't affect the parent
* rate.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
unsigned long parent_rate, new_parent_rate;
int ret;
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
parent_rate = clk_get_rate(parent);
KUNIT_ASSERT_GT(test, parent_rate, 0);
ret = clk_set_parent(clk, parent);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_drop_range(clk);
KUNIT_ASSERT_EQ(test, ret, 0);
new_parent_rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, new_parent_rate, 0);
KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
clk_put(parent);
clk_put(clk);
}
/*
* Test that, for a mux that started orphan but got switched to a valid
* parent, the rate of the mux and its new parent are consistent.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
unsigned long parent_rate, rate;
int ret;
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
parent_rate = clk_get_rate(parent);
KUNIT_ASSERT_GT(test, parent_rate, 0);
ret = clk_set_parent(clk, parent);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, parent_rate, rate);
clk_put(parent);
clk_put(clk);
}
/*
* Test that, for a mux that started orphan but got switched to a valid
* parent, calling clk_put() on the mux won't affect the parent rate.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk *clk, *parent;
unsigned long parent_rate, new_parent_rate;
int ret;
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
clk = clk_hw_get_clk(&ctx->hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
parent_rate = clk_get_rate(parent);
KUNIT_ASSERT_GT(test, parent_rate, 0);
ret = clk_set_parent(clk, parent);
KUNIT_ASSERT_EQ(test, ret, 0);
clk_put(clk);
new_parent_rate = clk_get_rate(parent);
KUNIT_ASSERT_GT(test, new_parent_rate, 0);
KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
clk_put(parent);
}
/*
* Test that, for a mux that started orphan but got switched to a valid
* parent, calling clk_set_rate_range() will affect the parent state if
* its rate is out of range.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
unsigned long rate;
int ret;
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
ret = clk_set_parent(clk, parent);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(parent);
clk_put(clk);
}
/*
* Test that, for a mux that started orphan but got switched to a valid
* parent, calling clk_set_rate_range() won't affect the parent state if
* its rate is within range.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
unsigned long parent_rate, new_parent_rate;
int ret;
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
parent_rate = clk_get_rate(parent);
KUNIT_ASSERT_GT(test, parent_rate, 0);
ret = clk_set_parent(clk, parent);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate_range(clk,
DUMMY_CLOCK_INIT_RATE - 1000,
DUMMY_CLOCK_INIT_RATE + 1000);
KUNIT_ASSERT_EQ(test, ret, 0);
new_parent_rate = clk_get_rate(parent);
KUNIT_ASSERT_GT(test, new_parent_rate, 0);
KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
clk_put(parent);
clk_put(clk);
}
/*
* Test that, for a mux whose current parent hasn't been registered yet,
* calling clk_set_rate_range() will succeed, and will be taken into
* account when rounding a rate.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
int ret;
ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
}
/*
* Test that, for a mux that started orphan, was assigned and rate and
* then got switched to a valid parent, its rate is eventually within
* range.
*
* FIXME: Even though we update the rate as part of clk_set_parent(), we
* don't evaluate whether that new rate is within range and needs to be
* adjusted.
*/
static void
clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
unsigned long rate;
int ret;
kunit_skip(test, "This needs to be fixed in the core.");
clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
ret = clk_set_parent(clk, parent);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(parent);
clk_put(clk);
}
static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
{}
};
/*
* Test suite for a basic mux clock with two parents. The default parent
* isn't registered, only the second parent is. By default, the clock
* will thus be orphan.
*
* These tests exercise the behaviour of the consumer API when dealing
* with an orphan clock, and how we deal with the transition to a valid
* parent.
*/
static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
.name = "clk-orphan-transparent-multiple-parent-mux-test",
.init = clk_orphan_transparent_multiple_parent_mux_test_init,
.exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
.test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
};
struct clk_single_parent_ctx {
struct clk_dummy_context parent_ctx;
struct clk_hw hw;
};
static int clk_single_parent_mux_test_init(struct kunit *test)
{
struct clk_single_parent_ctx *ctx;
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
ctx->parent_ctx.hw.init =
CLK_HW_INIT_NO_PARENT("parent-clk",
&clk_dummy_rate_ops,
0);
ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
if (ret)
return ret;
ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
&clk_dummy_single_parent_ops,
CLK_SET_RATE_PARENT);
ret = clk_hw_register(NULL, &ctx->hw);
if (ret)
return ret;
return 0;
}
static void
clk_single_parent_mux_test_exit(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
clk_hw_unregister(&ctx->hw);
clk_hw_unregister(&ctx->parent_ctx.hw);
}
/*
* Test that for a clock with a single parent, clk_get_parent() actually
* returns the parent.
*/
static void
clk_test_single_parent_mux_get_parent(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
clk_put(parent);
clk_put(clk);
}
/*
* Test that for a clock with a single parent, clk_has_parent() actually
* reports it as a parent.
*/
static void
clk_test_single_parent_mux_has_parent(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
clk_put(parent);
clk_put(clk);
}
/*
* Test that for a clock that can't modify its rate and with a single
* parent, if we set disjoints range on the parent and then the child,
* the second will return an error.
*
* FIXME: clk_set_rate_range() only considers the current clock when
* evaluating whether ranges are disjoints and not the upstream clocks
* ranges.
*/
static void
clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
int ret;
kunit_skip(test, "This needs to be fixed in the core.");
parent = clk_get_parent(clk);
KUNIT_ASSERT_PTR_NE(test, parent, NULL);
ret = clk_set_rate_range(parent, 1000, 2000);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate_range(clk, 3000, 4000);
KUNIT_EXPECT_LT(test, ret, 0);
clk_put(clk);
}
/*
* Test that for a clock that can't modify its rate and with a single
* parent, if we set disjoints range on the child and then the parent,
* the second will return an error.
*
* FIXME: clk_set_rate_range() only considers the current clock when
* evaluating whether ranges are disjoints and not the downstream clocks
* ranges.
*/
static void
clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
int ret;
kunit_skip(test, "This needs to be fixed in the core.");
parent = clk_get_parent(clk);
KUNIT_ASSERT_PTR_NE(test, parent, NULL);
ret = clk_set_rate_range(clk, 1000, 2000);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate_range(parent, 3000, 4000);
KUNIT_EXPECT_LT(test, ret, 0);
clk_put(clk);
}
/*
* Test that for a clock that can't modify its rate and with a single
* parent, if we set a range on the parent and then call
* clk_round_rate(), the boundaries of the parent are taken into
* account.
*/
static void
clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
unsigned long rate;
int ret;
parent = clk_get_parent(clk);
KUNIT_ASSERT_PTR_NE(test, parent, NULL);
ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
}
/*
* Test that for a clock that can't modify its rate and with a single
* parent, if we set a range on the parent and a more restrictive one on
* the child, and then call clk_round_rate(), the boundaries of the
* two clocks are taken into account.
*/
static void
clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
unsigned long rate;
int ret;
parent = clk_get_parent(clk);
KUNIT_ASSERT_PTR_NE(test, parent, NULL);
ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
clk_put(clk);
}
/*
* Test that for a clock that can't modify its rate and with a single
* parent, if we set a range on the child and a more restrictive one on
* the parent, and then call clk_round_rate(), the boundaries of the
* two clocks are taken into account.
*/
static void
clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *parent;
unsigned long rate;
int ret;
parent = clk_get_parent(clk);
KUNIT_ASSERT_PTR_NE(test, parent, NULL);
ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
clk_put(clk);
}
static struct kunit_case clk_single_parent_mux_test_cases[] = {
KUNIT_CASE(clk_test_single_parent_mux_get_parent),
KUNIT_CASE(clk_test_single_parent_mux_has_parent),
KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
{}
};
/*
* Test suite for a basic mux clock with one parent, with
* CLK_SET_RATE_PARENT on the child.
*
* These tests exercise the consumer API and check that the state of the
* child and parent are sane and consistent.
*/
static struct kunit_suite
clk_single_parent_mux_test_suite = {
.name = "clk-single-parent-mux-test",
.init = clk_single_parent_mux_test_init,
.exit = clk_single_parent_mux_test_exit,
.test_cases = clk_single_parent_mux_test_cases,
};
static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
{
struct clk_single_parent_ctx *ctx;
struct clk_init_data init = { };
const char * const parents[] = { "orphan_parent" };
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
init.name = "test_orphan_dummy_parent";
init.ops = &clk_dummy_single_parent_ops;
init.parent_names = parents;
init.num_parents = ARRAY_SIZE(parents);
init.flags = CLK_SET_RATE_PARENT;
ctx->hw.init = &init;
ret = clk_hw_register(NULL, &ctx->hw);
if (ret)
return ret;
memset(&init, 0, sizeof(init));
init.name = "orphan_parent";
init.ops = &clk_dummy_rate_ops;
ctx->parent_ctx.hw.init = &init;
ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
if (ret)
return ret;
return 0;
}
/*
* Test that a mux-only clock, with an initial rate within a range,
* will still have the same rate after the range has been enforced.
*
* See:
* https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
*/
static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate, new_rate;
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(clk,
ctx->parent_ctx.rate - 1000,
ctx->parent_ctx.rate + 1000),
0);
new_rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, new_rate, 0);
KUNIT_EXPECT_EQ(test, rate, new_rate);
clk_put(clk);
}
static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
{}
};
/*
* Test suite for a basic mux clock with one parent. The parent is
* registered after its child. The clock will thus be an orphan when
* registered, but will no longer be when the tests run.
*
* These tests make sure a clock that used to be orphan has a sane,
* consistent, behaviour.
*/
static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
.name = "clk-orphan-transparent-single-parent-test",
.init = clk_orphan_transparent_single_parent_mux_test_init,
.exit = clk_single_parent_mux_test_exit,
.test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
};
struct clk_single_parent_two_lvl_ctx {
struct clk_dummy_context parent_parent_ctx;
struct clk_dummy_context parent_ctx;
struct clk_hw hw;
};
static int
clk_orphan_two_level_root_last_test_init(struct kunit *test)
{
struct clk_single_parent_two_lvl_ctx *ctx;
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
ctx->parent_ctx.hw.init =
CLK_HW_INIT("intermediate-parent",
"root-parent",
&clk_dummy_single_parent_ops,
CLK_SET_RATE_PARENT);
ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
if (ret)
return ret;
ctx->hw.init =
CLK_HW_INIT("test-clk", "intermediate-parent",
&clk_dummy_single_parent_ops,
CLK_SET_RATE_PARENT);
ret = clk_hw_register(NULL, &ctx->hw);
if (ret)
return ret;
ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
ctx->parent_parent_ctx.hw.init =
CLK_HW_INIT_NO_PARENT("root-parent",
&clk_dummy_rate_ops,
0);
ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
if (ret)
return ret;
return 0;
}
static void
clk_orphan_two_level_root_last_test_exit(struct kunit *test)
{
struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
clk_hw_unregister(&ctx->hw);
clk_hw_unregister(&ctx->parent_ctx.hw);
clk_hw_unregister(&ctx->parent_parent_ctx.hw);
}
/*
* Test that, for a clock whose parent used to be orphan, clk_get_rate()
* will return the proper rate.
*/
static void
clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
{
struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
rate = clk_get_rate(clk);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
clk_put(clk);
}
/*
* Test that, for a clock whose parent used to be orphan,
* clk_set_rate_range() won't affect its rate if it is already within
* range.
*
* See (for Exynos 4210):
* https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
*/ */
static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test) static void
clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
{ {
struct clk_single_parent_ctx *ctx = test->priv; struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate, new_rate; unsigned long rate;
int ret;
ret = clk_set_rate_range(clk,
DUMMY_CLOCK_INIT_RATE - 1000,
DUMMY_CLOCK_INIT_RATE + 1000);
KUNIT_ASSERT_EQ(test, ret, 0);
rate = clk_get_rate(clk); rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0); KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
KUNIT_ASSERT_EQ(test, clk_put(clk);
clk_set_rate_range(clk,
ctx->parent_ctx.rate - 1000,
ctx->parent_ctx.rate + 1000),
0);
new_rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, new_rate, 0);
KUNIT_EXPECT_EQ(test, rate, new_rate);
} }
static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = { static struct kunit_case
KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range), clk_orphan_two_level_root_last_test_cases[] = {
KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
{} {}
}; };
static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = { /*
.name = "clk-orphan-transparent-single-parent-test", * Test suite for a basic, transparent, clock with a parent that is also
.init = clk_orphan_transparent_single_parent_mux_test_init, * such a clock. The parent's parent is registered last, while the
.exit = clk_orphan_transparent_single_parent_mux_test_exit, * parent and its child are registered in that order. The intermediate
.test_cases = clk_orphan_transparent_single_parent_mux_test_cases, * and leaf clocks will thus be orphan when registered, but the leaf
* clock itself will always have its parent and will never be
* reparented. Indeed, it's only orphan because its parent is.
*
* These tests exercise the behaviour of the consumer API when dealing
* with an orphan clock, and how we deal with the transition to a valid
* parent.
*/
static struct kunit_suite
clk_orphan_two_level_root_last_test_suite = {
.name = "clk-orphan-two-level-root-last-test",
.init = clk_orphan_two_level_root_last_test_init,
.exit = clk_orphan_two_level_root_last_test_exit,
.test_cases = clk_orphan_two_level_root_last_test_cases,
}; };
/* /*
...@@ -352,7 +1434,7 @@ static void clk_range_test_set_range(struct kunit *test) ...@@ -352,7 +1434,7 @@ static void clk_range_test_set_range(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate; unsigned long rate;
KUNIT_ASSERT_EQ(test, KUNIT_ASSERT_EQ(test,
...@@ -365,6 +1447,8 @@ static void clk_range_test_set_range(struct kunit *test) ...@@ -365,6 +1447,8 @@ static void clk_range_test_set_range(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0); KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
} }
/* /*
...@@ -375,13 +1459,15 @@ static void clk_range_test_set_range_invalid(struct kunit *test) ...@@ -375,13 +1459,15 @@ static void clk_range_test_set_range_invalid(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
KUNIT_EXPECT_LT(test, KUNIT_EXPECT_LT(test,
clk_set_rate_range(clk, clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_1 + 1000,
DUMMY_CLOCK_RATE_1), DUMMY_CLOCK_RATE_1),
0); 0);
clk_put(clk);
} }
/* /*
...@@ -420,7 +1506,7 @@ static void clk_range_test_set_range_round_rate_lower(struct kunit *test) ...@@ -420,7 +1506,7 @@ static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
long rate; long rate;
KUNIT_ASSERT_EQ(test, KUNIT_ASSERT_EQ(test,
...@@ -433,6 +1519,8 @@ static void clk_range_test_set_range_round_rate_lower(struct kunit *test) ...@@ -433,6 +1519,8 @@ static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0); KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
} }
/* /*
...@@ -443,7 +1531,7 @@ static void clk_range_test_set_range_set_rate_lower(struct kunit *test) ...@@ -443,7 +1531,7 @@ static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate; unsigned long rate;
KUNIT_ASSERT_EQ(test, KUNIT_ASSERT_EQ(test,
...@@ -460,6 +1548,8 @@ static void clk_range_test_set_range_set_rate_lower(struct kunit *test) ...@@ -460,6 +1548,8 @@ static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0); KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
} }
/* /*
...@@ -472,7 +1562,7 @@ static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kuni ...@@ -472,7 +1562,7 @@ static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kuni
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
long rounded; long rounded;
KUNIT_ASSERT_EQ(test, KUNIT_ASSERT_EQ(test,
...@@ -489,6 +1579,8 @@ static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kuni ...@@ -489,6 +1579,8 @@ static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kuni
0); 0);
KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk)); KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
clk_put(clk);
} }
/* /*
...@@ -499,7 +1591,7 @@ static void clk_range_test_set_range_round_rate_higher(struct kunit *test) ...@@ -499,7 +1591,7 @@ static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
long rate; long rate;
KUNIT_ASSERT_EQ(test, KUNIT_ASSERT_EQ(test,
...@@ -512,6 +1604,8 @@ static void clk_range_test_set_range_round_rate_higher(struct kunit *test) ...@@ -512,6 +1604,8 @@ static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0); KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
} }
/* /*
...@@ -522,7 +1616,7 @@ static void clk_range_test_set_range_set_rate_higher(struct kunit *test) ...@@ -522,7 +1616,7 @@ static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate; unsigned long rate;
KUNIT_ASSERT_EQ(test, KUNIT_ASSERT_EQ(test,
...@@ -539,6 +1633,8 @@ static void clk_range_test_set_range_set_rate_higher(struct kunit *test) ...@@ -539,6 +1633,8 @@ static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0); KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
} }
/* /*
...@@ -551,7 +1647,7 @@ static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kun ...@@ -551,7 +1647,7 @@ static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kun
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
long rounded; long rounded;
KUNIT_ASSERT_EQ(test, KUNIT_ASSERT_EQ(test,
...@@ -568,6 +1664,8 @@ static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kun ...@@ -568,6 +1664,8 @@ static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kun
0); 0);
KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk)); KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
clk_put(clk);
} }
/* /*
...@@ -582,7 +1680,7 @@ static void clk_range_test_set_range_get_rate_raised(struct kunit *test) ...@@ -582,7 +1680,7 @@ static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate; unsigned long rate;
KUNIT_ASSERT_EQ(test, KUNIT_ASSERT_EQ(test,
...@@ -598,6 +1696,8 @@ static void clk_range_test_set_range_get_rate_raised(struct kunit *test) ...@@ -598,6 +1696,8 @@ static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
rate = clk_get_rate(clk); rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0); KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_put(clk);
} }
/* /*
...@@ -612,7 +1712,7 @@ static void clk_range_test_set_range_get_rate_lowered(struct kunit *test) ...@@ -612,7 +1712,7 @@ static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate; unsigned long rate;
KUNIT_ASSERT_EQ(test, KUNIT_ASSERT_EQ(test,
...@@ -628,6 +1728,8 @@ static void clk_range_test_set_range_get_rate_lowered(struct kunit *test) ...@@ -628,6 +1728,8 @@ static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
rate = clk_get_rate(clk); rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0); KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2); KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
} }
static struct kunit_case clk_range_test_cases[] = { static struct kunit_case clk_range_test_cases[] = {
...@@ -645,6 +1747,12 @@ static struct kunit_case clk_range_test_cases[] = { ...@@ -645,6 +1747,12 @@ static struct kunit_case clk_range_test_cases[] = {
{} {}
}; };
/*
* Test suite for a basic rate clock, without any parent.
*
* These tests exercise the rate range API: clk_set_rate_range(),
* clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
*/
static struct kunit_suite clk_range_test_suite = { static struct kunit_suite clk_range_test_suite = {
.name = "clk-range-test", .name = "clk-range-test",
.init = clk_test_init, .init = clk_test_init,
...@@ -664,7 +1772,7 @@ static void clk_range_test_set_range_rate_maximized(struct kunit *test) ...@@ -664,7 +1772,7 @@ static void clk_range_test_set_range_rate_maximized(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate; unsigned long rate;
KUNIT_ASSERT_EQ(test, KUNIT_ASSERT_EQ(test,
...@@ -700,6 +1808,8 @@ static void clk_range_test_set_range_rate_maximized(struct kunit *test) ...@@ -700,6 +1808,8 @@ static void clk_range_test_set_range_rate_maximized(struct kunit *test)
rate = clk_get_rate(clk); rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0); KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2); KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(clk);
} }
/* /*
...@@ -714,7 +1824,7 @@ static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test) ...@@ -714,7 +1824,7 @@ static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *user1, *user2; struct clk *user1, *user2;
unsigned long rate; unsigned long rate;
...@@ -758,14 +1868,79 @@ static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test) ...@@ -758,14 +1868,79 @@ static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
clk_put(user2); clk_put(user2);
clk_put(user1); clk_put(user1);
clk_put(clk);
}
/*
* Test that if we have several subsequent calls to
* clk_set_rate_range(), across multiple users, the core will reevaluate
* whether a new rate is needed, including when a user drop its clock.
*
* With clk_dummy_maximize_rate_ops, this means that the rate will
* trail along the maximum as it evolves.
*/
static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *user1, *user2;
unsigned long rate;
user1 = clk_hw_get_clk(hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
user2 = clk_hw_get_clk(hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
KUNIT_ASSERT_EQ(test,
clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
0);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(user1,
0,
DUMMY_CLOCK_RATE_2),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(user2,
0,
DUMMY_CLOCK_RATE_1),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_put(user2);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(user1);
clk_put(clk);
} }
static struct kunit_case clk_range_maximize_test_cases[] = { static struct kunit_case clk_range_maximize_test_cases[] = {
KUNIT_CASE(clk_range_test_set_range_rate_maximized), KUNIT_CASE(clk_range_test_set_range_rate_maximized),
KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized), KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
{} {}
}; };
/*
* Test suite for a basic rate clock, without any parent.
*
* These tests exercise the rate range API: clk_set_rate_range(),
* clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
* driver that will always try to run at the highest possible rate.
*/
static struct kunit_suite clk_range_maximize_test_suite = { static struct kunit_suite clk_range_maximize_test_suite = {
.name = "clk-range-maximize-test", .name = "clk-range-maximize-test",
.init = clk_maximize_test_init, .init = clk_maximize_test_init,
...@@ -785,7 +1960,7 @@ static void clk_range_test_set_range_rate_minimized(struct kunit *test) ...@@ -785,7 +1960,7 @@ static void clk_range_test_set_range_rate_minimized(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate; unsigned long rate;
KUNIT_ASSERT_EQ(test, KUNIT_ASSERT_EQ(test,
...@@ -821,6 +1996,8 @@ static void clk_range_test_set_range_rate_minimized(struct kunit *test) ...@@ -821,6 +1996,8 @@ static void clk_range_test_set_range_rate_minimized(struct kunit *test)
rate = clk_get_rate(clk); rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0); KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_put(clk);
} }
/* /*
...@@ -835,7 +2012,7 @@ static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test) ...@@ -835,7 +2012,7 @@ static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
{ {
struct clk_dummy_context *ctx = test->priv; struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw; struct clk_hw *hw = &ctx->hw;
struct clk *clk = hw->clk; struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *user1, *user2; struct clk *user1, *user2;
unsigned long rate; unsigned long rate;
...@@ -875,14 +2052,75 @@ static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test) ...@@ -875,14 +2052,75 @@ static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
clk_put(user2); clk_put(user2);
clk_put(user1); clk_put(user1);
clk_put(clk);
}
/*
* Test that if we have several subsequent calls to
* clk_set_rate_range(), across multiple users, the core will reevaluate
* whether a new rate is needed, including when a user drop its clock.
*
* With clk_dummy_minimize_rate_ops, this means that the rate will
* trail along the minimum as it evolves.
*/
static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *user1, *user2;
unsigned long rate;
user1 = clk_hw_get_clk(hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
user2 = clk_hw_get_clk(hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(user1,
DUMMY_CLOCK_RATE_1,
ULONG_MAX),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_ASSERT_EQ(test,
clk_set_rate_range(user2,
DUMMY_CLOCK_RATE_2,
ULONG_MAX),
0);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
clk_put(user2);
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_put(user1);
clk_put(clk);
} }
static struct kunit_case clk_range_minimize_test_cases[] = { static struct kunit_case clk_range_minimize_test_cases[] = {
KUNIT_CASE(clk_range_test_set_range_rate_minimized), KUNIT_CASE(clk_range_test_set_range_rate_minimized),
KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized), KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
{} {}
}; };
/*
* Test suite for a basic rate clock, without any parent.
*
* These tests exercise the rate range API: clk_set_rate_range(),
* clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
* driver that will always try to run at the lowest possible rate.
*/
static struct kunit_suite clk_range_minimize_test_suite = { static struct kunit_suite clk_range_minimize_test_suite = {
.name = "clk-range-minimize-test", .name = "clk-range-minimize-test",
.init = clk_minimize_test_init, .init = clk_minimize_test_init,
...@@ -890,11 +2128,284 @@ static struct kunit_suite clk_range_minimize_test_suite = { ...@@ -890,11 +2128,284 @@ static struct kunit_suite clk_range_minimize_test_suite = {
.test_cases = clk_range_minimize_test_cases, .test_cases = clk_range_minimize_test_cases,
}; };
struct clk_leaf_mux_ctx {
struct clk_multiple_parent_ctx mux_ctx;
struct clk_hw hw;
};
static int
clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
{
struct clk_leaf_mux_ctx *ctx;
const char *top_parents[2] = { "parent-0", "parent-1" };
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
&clk_dummy_rate_ops,
0);
ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
if (ret)
return ret;
ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
&clk_dummy_rate_ops,
0);
ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
if (ret)
return ret;
ctx->mux_ctx.current_parent = 0;
ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
&clk_multiple_parents_mux_ops,
0);
ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
if (ret)
return ret;
ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->mux_ctx.hw,
&clk_dummy_single_parent_ops,
CLK_SET_RATE_PARENT);
ret = clk_hw_register(NULL, &ctx->hw);
if (ret)
return ret;
return 0;
}
static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
{
struct clk_leaf_mux_ctx *ctx = test->priv;
clk_hw_unregister(&ctx->hw);
clk_hw_unregister(&ctx->mux_ctx.hw);
clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
}
/*
* Test that, for a clock that will forward any rate request to its
* parent, the rate request structure returned by __clk_determine_rate
* is sane and will be what we expect.
*/
static void clk_leaf_mux_set_rate_parent_determine_rate(struct kunit *test)
{
struct clk_leaf_mux_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk_rate_request req;
unsigned long rate;
int ret;
rate = clk_get_rate(clk);
KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
clk_hw_init_rate_request(hw, &req, DUMMY_CLOCK_RATE_2);
ret = __clk_determine_rate(hw, &req);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
clk_put(clk);
}
static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
KUNIT_CASE(clk_leaf_mux_set_rate_parent_determine_rate),
{}
};
/*
* Test suite for a clock whose parent is a mux with multiple parents.
* The leaf clock has CLK_SET_RATE_PARENT, and will forward rate
* requests to the mux, which will then select which parent is the best
* fit for a given rate.
*
* These tests exercise the behaviour of muxes, and the proper selection
* of parents.
*/
static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
.name = "clk-leaf-mux-set-rate-parent",
.init = clk_leaf_mux_set_rate_parent_test_init,
.exit = clk_leaf_mux_set_rate_parent_test_exit,
.test_cases = clk_leaf_mux_set_rate_parent_test_cases,
};
struct clk_mux_notifier_rate_change {
bool done;
unsigned long old_rate;
unsigned long new_rate;
wait_queue_head_t wq;
};
struct clk_mux_notifier_ctx {
struct clk_multiple_parent_ctx mux_ctx;
struct clk *clk;
struct notifier_block clk_nb;
struct clk_mux_notifier_rate_change pre_rate_change;
struct clk_mux_notifier_rate_change post_rate_change;
};
#define NOTIFIER_TIMEOUT_MS 100
static int clk_mux_notifier_callback(struct notifier_block *nb,
unsigned long action, void *data)
{
struct clk_notifier_data *clk_data = data;
struct clk_mux_notifier_ctx *ctx = container_of(nb,
struct clk_mux_notifier_ctx,
clk_nb);
if (action & PRE_RATE_CHANGE) {
ctx->pre_rate_change.old_rate = clk_data->old_rate;
ctx->pre_rate_change.new_rate = clk_data->new_rate;
ctx->pre_rate_change.done = true;
wake_up_interruptible(&ctx->pre_rate_change.wq);
}
if (action & POST_RATE_CHANGE) {
ctx->post_rate_change.old_rate = clk_data->old_rate;
ctx->post_rate_change.new_rate = clk_data->new_rate;
ctx->post_rate_change.done = true;
wake_up_interruptible(&ctx->post_rate_change.wq);
}
return 0;
}
static int clk_mux_notifier_test_init(struct kunit *test)
{
struct clk_mux_notifier_ctx *ctx;
const char *top_parents[2] = { "parent-0", "parent-1" };
int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
test->priv = ctx;
ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
init_waitqueue_head(&ctx->pre_rate_change.wq);
init_waitqueue_head(&ctx->post_rate_change.wq);
ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
&clk_dummy_rate_ops,
0);
ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
if (ret)
return ret;
ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
&clk_dummy_rate_ops,
0);
ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
if (ret)
return ret;
ctx->mux_ctx.current_parent = 0;
ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
&clk_multiple_parents_mux_ops,
0);
ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
if (ret)
return ret;
ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
if (ret)
return ret;
return 0;
}
static void clk_mux_notifier_test_exit(struct kunit *test)
{
struct clk_mux_notifier_ctx *ctx = test->priv;
struct clk *clk = ctx->clk;
clk_notifier_unregister(clk, &ctx->clk_nb);
clk_put(clk);
clk_hw_unregister(&ctx->mux_ctx.hw);
clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
}
/*
* Test that if the we have a notifier registered on a mux, the core
* will notify us when we switch to another parent, and with the proper
* old and new rates.
*/
static void clk_mux_notifier_set_parent_test(struct kunit *test)
{
struct clk_mux_notifier_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->mux_ctx.hw;
struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
int ret;
ret = clk_set_parent(clk, new_parent);
KUNIT_ASSERT_EQ(test, ret, 0);
ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
ctx->pre_rate_change.done,
msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
KUNIT_ASSERT_GT(test, ret, 0);
KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
ctx->post_rate_change.done,
msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
KUNIT_ASSERT_GT(test, ret, 0);
KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
clk_put(new_parent);
clk_put(clk);
}
static struct kunit_case clk_mux_notifier_test_cases[] = {
KUNIT_CASE(clk_mux_notifier_set_parent_test),
{}
};
/*
* Test suite for a mux with multiple parents, and a notifier registered
* on the mux.
*
* These tests exercise the behaviour of notifiers.
*/
static struct kunit_suite clk_mux_notifier_test_suite = {
.name = "clk-mux-notifier",
.init = clk_mux_notifier_test_init,
.exit = clk_mux_notifier_test_exit,
.test_cases = clk_mux_notifier_test_cases,
};
kunit_test_suites( kunit_test_suites(
&clk_leaf_mux_set_rate_parent_test_suite,
&clk_test_suite, &clk_test_suite,
&clk_multiple_parents_mux_test_suite,
&clk_mux_notifier_test_suite,
&clk_orphan_transparent_multiple_parent_mux_test_suite,
&clk_orphan_transparent_single_parent_test_suite, &clk_orphan_transparent_single_parent_test_suite,
&clk_orphan_two_level_root_last_test_suite,
&clk_range_test_suite, &clk_range_test_suite,
&clk_range_maximize_test_suite, &clk_range_maximize_test_suite,
&clk_range_minimize_test_suite &clk_range_minimize_test_suite,
&clk_single_parent_mux_test_suite,
&clk_uncached_test_suite
); );
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -129,9 +129,18 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index) ...@@ -129,9 +129,18 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
return 0; return 0;
} }
static int mtk_clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
return clk_mux_determine_rate_flags(hw, req, mux->data->flags);
}
const struct clk_ops mtk_mux_clr_set_upd_ops = { const struct clk_ops mtk_mux_clr_set_upd_ops = {
.get_parent = mtk_clk_mux_get_parent, .get_parent = mtk_clk_mux_get_parent,
.set_parent = mtk_clk_mux_set_parent_setclr_lock, .set_parent = mtk_clk_mux_set_parent_setclr_lock,
.determine_rate = mtk_clk_mux_determine_rate,
}; };
EXPORT_SYMBOL_GPL(mtk_mux_clr_set_upd_ops); EXPORT_SYMBOL_GPL(mtk_mux_clr_set_upd_ops);
...@@ -141,6 +150,7 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = { ...@@ -141,6 +150,7 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
.is_enabled = mtk_clk_mux_is_enabled, .is_enabled = mtk_clk_mux_is_enabled,
.get_parent = mtk_clk_mux_get_parent, .get_parent = mtk_clk_mux_get_parent,
.set_parent = mtk_clk_mux_set_parent_setclr_lock, .set_parent = mtk_clk_mux_set_parent_setclr_lock,
.determine_rate = mtk_clk_mux_determine_rate,
}; };
EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops); EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
......
...@@ -915,6 +915,15 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw, ...@@ -915,6 +915,15 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
req->best_parent_hw = p2; req->best_parent_hw = p2;
} }
clk_hw_get_rate_range(req->best_parent_hw,
&parent_req.min_rate, &parent_req.max_rate);
if (req->min_rate > parent_req.min_rate)
parent_req.min_rate = req->min_rate;
if (req->max_rate < parent_req.max_rate)
parent_req.max_rate = req->max_rate;
ret = __clk_determine_rate(req->best_parent_hw, &parent_req); ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
if (ret) if (ret)
return ret; return ret;
......
...@@ -42,6 +42,8 @@ struct dentry; ...@@ -42,6 +42,8 @@ struct dentry;
* struct clk_rate_request - Structure encoding the clk constraints that * struct clk_rate_request - Structure encoding the clk constraints that
* a clock user might require. * a clock user might require.
* *
* Should be initialized by calling clk_hw_init_rate_request().
*
* @rate: Requested clock rate. This field will be adjusted by * @rate: Requested clock rate. This field will be adjusted by
* clock drivers according to hardware capabilities. * clock drivers according to hardware capabilities.
* @min_rate: Minimum rate imposed by clk users. * @min_rate: Minimum rate imposed by clk users.
...@@ -60,6 +62,15 @@ struct clk_rate_request { ...@@ -60,6 +62,15 @@ struct clk_rate_request {
struct clk_hw *best_parent_hw; struct clk_hw *best_parent_hw;
}; };
void clk_hw_init_rate_request(const struct clk_hw *hw,
struct clk_rate_request *req,
unsigned long rate);
void clk_hw_forward_rate_request(const struct clk_hw *core,
const struct clk_rate_request *old_req,
const struct clk_hw *parent,
struct clk_rate_request *req,
unsigned long parent_rate);
/** /**
* struct clk_duty - Struture encoding the duty cycle ratio of a clock * struct clk_duty - Struture encoding the duty cycle ratio of a clock
* *
...@@ -118,8 +129,9 @@ struct clk_duty { ...@@ -118,8 +129,9 @@ struct clk_duty {
* *
* @recalc_rate Recalculate the rate of this clock, by querying hardware. The * @recalc_rate Recalculate the rate of this clock, by querying hardware. The
* parent rate is an input parameter. It is up to the caller to * parent rate is an input parameter. It is up to the caller to
* ensure that the prepare_mutex is held across this call. * ensure that the prepare_mutex is held across this call. If the
* Returns the calculated rate. Optional, but recommended - if * driver cannot figure out a rate for this clock, it must return
* 0. Returns the calculated rate. Optional, but recommended - if
* this op is not set then clock rate will be initialized to 0. * this op is not set then clock rate will be initialized to 0.
* *
* @round_rate: Given a target rate as input, returns the closest rate actually * @round_rate: Given a target rate as input, returns the closest rate actually
...@@ -1303,6 +1315,8 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw, ...@@ -1303,6 +1315,8 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_rate_request *req, struct clk_rate_request *req,
unsigned long flags); unsigned long flags);
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent); void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
unsigned long *max_rate);
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate); unsigned long max_rate);
......
...@@ -799,7 +799,7 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); ...@@ -799,7 +799,7 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
* *
* Returns true if @parent is a possible parent for @clk, false otherwise. * Returns true if @parent is a possible parent for @clk, false otherwise.
*/ */
bool clk_has_parent(struct clk *clk, struct clk *parent); bool clk_has_parent(const struct clk *clk, const struct clk *parent);
/** /**
* clk_set_rate_range - set a rate range for a clock source * clk_set_rate_range - set a rate range for a clock source
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment