core.c 84.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9 10
/*
 * Generic OPP Interface
 *
 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
 *	Nishanth Menon
 *	Romit Dasgupta
 *	Kevin Hilman
 */

11 12
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

Viresh Kumar's avatar
Viresh Kumar committed
13
#include <linux/clk.h>
14 15
#include <linux/errno.h>
#include <linux/err.h>
16
#include <linux/device.h>
17
#include <linux/export.h>
18
#include <linux/pm_domain.h>
19
#include <linux/regulator/consumer.h>
20 21
#include <linux/slab.h>
#include <linux/xarray.h>
22

23
#include "opp.h"
24 25

/*
26 27
 * The root of the list of all opp-tables. All opp_table structures branch off
 * from here, with each opp_table containing the list of opps it supports in
28 29
 * various states of availability.
 */
30
LIST_HEAD(opp_tables);
31

32
/* Lock to allow exclusive modification to the device and opp lists */
33
DEFINE_MUTEX(opp_table_lock);
34 35
/* Flag indicating that opp_tables list is being updated at the moment */
static bool opp_tables_busy;
36

37 38 39
/* OPP ID allocator */
static DEFINE_XARRAY_ALLOC1(opp_configs);

40
static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table)
41
{
42
	struct opp_device *opp_dev;
43
	bool found = false;
44

45
	mutex_lock(&opp_table->lock);
46
	list_for_each_entry(opp_dev, &opp_table->dev_list, node)
47 48 49 50
		if (opp_dev->dev == dev) {
			found = true;
			break;
		}
51

52 53
	mutex_unlock(&opp_table->lock);
	return found;
54 55
}

56
static struct opp_table *_find_opp_table_unlocked(struct device *dev)
57 58 59 60
{
	struct opp_table *opp_table;

	list_for_each_entry(opp_table, &opp_tables, node) {
61
		if (_find_opp_dev(dev, opp_table)) {
62 63 64 65 66 67 68 69
			_get_opp_table_kref(opp_table);
			return opp_table;
		}
	}

	return ERR_PTR(-ENODEV);
}

70
/**
71 72
 * _find_opp_table() - find opp_table struct using device pointer
 * @dev:	device pointer used to lookup OPP table
73
 *
74
 * Search OPP table for one containing matching device.
75
 *
76
 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
77 78
 * -EINVAL based on type of error.
 *
79
 * The callers must call dev_pm_opp_put_opp_table() after the table is used.
80
 */
81
struct opp_table *_find_opp_table(struct device *dev)
82
{
83
	struct opp_table *opp_table;
84

85
	if (IS_ERR_OR_NULL(dev)) {
86 87 88 89
		pr_err("%s: Invalid parameters\n", __func__);
		return ERR_PTR(-EINVAL);
	}

90 91 92
	mutex_lock(&opp_table_lock);
	opp_table = _find_opp_table_unlocked(dev);
	mutex_unlock(&opp_table_lock);
93

94
	return opp_table;
95 96
}

97 98 99 100 101 102 103 104 105 106 107 108
/*
 * Returns true if multiple clocks aren't there, else returns false with WARN.
 *
 * We don't force clk_count == 1 here as there are users who don't have a clock
 * representation in the OPP table and manage the clock configuration themselves
 * in an platform specific way.
 */
static bool assert_single_clk(struct opp_table *opp_table)
{
	return !WARN_ON(opp_table->clk_count > 1);
}

109
/**
110
 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
111 112
 * @opp:	opp for which voltage has to be returned for
 *
113
 * Return: voltage in micro volt corresponding to the opp, else
114 115
 * return 0
 *
116
 * This is useful only for devices with single power supply.
117
 */
118
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
119
{
120
	if (IS_ERR_OR_NULL(opp)) {
121
		pr_err("%s: Invalid parameters\n", __func__);
122 123
		return 0;
	}
124

125
	return opp->supplies[0].u_volt;
126
}
127
EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
128

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
/**
 * dev_pm_opp_get_supplies() - Gets the supply information corresponding to an opp
 * @opp:	opp for which voltage has to be returned for
 * @supplies:	Placeholder for copying the supply information.
 *
 * Return: negative error number on failure, 0 otherwise on success after
 * setting @supplies.
 *
 * This can be used for devices with any number of power supplies. The caller
 * must ensure the @supplies array must contain space for each regulator.
 */
int dev_pm_opp_get_supplies(struct dev_pm_opp *opp,
			    struct dev_pm_opp_supply *supplies)
{
	if (IS_ERR_OR_NULL(opp) || !supplies) {
		pr_err("%s: Invalid parameters\n", __func__);
		return -EINVAL;
	}

	memcpy(supplies, opp->supplies,
	       sizeof(*supplies) * opp->opp_table->regulator_count);
	return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_supplies);

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
/**
 * dev_pm_opp_get_power() - Gets the power corresponding to an opp
 * @opp:	opp for which power has to be returned for
 *
 * Return: power in micro watt corresponding to the opp, else
 * return 0
 *
 * This is useful only for devices with single power supply.
 */
unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp)
{
	unsigned long opp_power = 0;
	int i;

	if (IS_ERR_OR_NULL(opp)) {
		pr_err("%s: Invalid parameters\n", __func__);
		return 0;
	}
	for (i = 0; i < opp->opp_table->regulator_count; i++)
		opp_power += opp->supplies[i].u_watt;

	return opp_power;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_power);

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
/**
 * dev_pm_opp_get_freq_indexed() - Gets the frequency corresponding to an
 *				   available opp with specified index
 * @opp: opp for which frequency has to be returned for
 * @index: index of the frequency within the required opp
 *
 * Return: frequency in hertz corresponding to the opp with specified index,
 * else return 0
 */
unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index)
{
	if (IS_ERR_OR_NULL(opp) || index >= opp->opp_table->clk_count) {
		pr_err("%s: Invalid parameters\n", __func__);
		return 0;
	}

	return opp->rates[index];
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed);

199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
/**
 * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
 * @opp:	opp for which level value has to be returned for
 *
 * Return: level read from device tree corresponding to the opp, else
 * return 0.
 */
unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
{
	if (IS_ERR_OR_NULL(opp) || !opp->available) {
		pr_err("%s: Invalid parameters\n", __func__);
		return 0;
	}

	return opp->level;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);

217 218 219 220 221 222 223 224 225 226 227 228 229
/**
 * dev_pm_opp_get_required_pstate() - Gets the required performance state
 *                                    corresponding to an available opp
 * @opp:	opp for which performance state has to be returned for
 * @index:	index of the required opp
 *
 * Return: performance state read from device tree corresponding to the
 * required opp, else return 0.
 */
unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
					    unsigned int index)
{
	if (IS_ERR_OR_NULL(opp) || !opp->available ||
230
	    index >= opp->opp_table->required_opp_count) {
231 232 233 234
		pr_err("%s: Invalid parameters\n", __func__);
		return 0;
	}

235
	/* required-opps not fully initialized yet */
236
	if (lazy_linking_pending(opp->opp_table))
237 238
		return 0;

239
	/* The required OPP table must belong to a genpd */
240
	if (unlikely(!opp->opp_table->required_opp_tables[index]->is_genpd)) {
241 242 243 244
		pr_err("%s: Performance state is only valid for genpds.\n", __func__);
		return 0;
	}

245
	return opp->required_opps[index]->level;
246 247 248
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate);

249 250 251 252 253 254 255 256 257 258 259 260
/**
 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
 * @opp: opp for which turbo mode is being verified
 *
 * Turbo OPPs are not for normal use, and can be enabled (under certain
 * conditions) for short duration of times to finish high throughput work
 * quickly. Running on them for longer times may overheat the chip.
 *
 * Return: true if opp is turbo opp, else false.
 */
bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
{
261
	if (IS_ERR_OR_NULL(opp) || !opp->available) {
262 263 264 265
		pr_err("%s: Invalid parameters\n", __func__);
		return false;
	}

266
	return opp->turbo;
267 268 269
}
EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);

270 271 272 273 274 275 276 277
/**
 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
 * @dev:	device for which we do this operation
 *
 * Return: This function returns the max clock latency in nanoseconds.
 */
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
{
278
	struct opp_table *opp_table;
279 280
	unsigned long clock_latency_ns;

281 282
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
283 284 285 286 287
		return 0;

	clock_latency_ns = opp_table->clock_latency_ns_max;

	dev_pm_opp_put_opp_table(opp_table);
288 289 290 291 292

	return clock_latency_ns;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);

293 294 295 296 297 298 299 300
/**
 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
 * @dev: device for which we do this operation
 *
 * Return: This function returns the max voltage latency in nanoseconds.
 */
unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
{
301
	struct opp_table *opp_table;
302
	struct dev_pm_opp *opp;
303
	struct regulator *reg;
304
	unsigned long latency_ns = 0;
305 306 307 308 309 310
	int ret, i, count;
	struct {
		unsigned long min;
		unsigned long max;
	} *uV;

311 312 313 314
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
		return 0;

315
	/* Regulator may not be required for the device */
316
	if (!opp_table->regulators)
317
		goto put_opp_table;
318

319 320
	count = opp_table->regulator_count;

321 322
	uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
	if (!uV)
323
		goto put_opp_table;
324

325 326
	mutex_lock(&opp_table->lock);

327 328 329
	for (i = 0; i < count; i++) {
		uV[i].min = ~0;
		uV[i].max = 0;
330

331
		list_for_each_entry(opp, &opp_table->opp_list, node) {
332 333 334 335 336 337 338 339
			if (!opp->available)
				continue;

			if (opp->supplies[i].u_volt_min < uV[i].min)
				uV[i].min = opp->supplies[i].u_volt_min;
			if (opp->supplies[i].u_volt_max > uV[i].max)
				uV[i].max = opp->supplies[i].u_volt_max;
		}
340 341
	}

342
	mutex_unlock(&opp_table->lock);
343 344

	/*
345
	 * The caller needs to ensure that opp_table (and hence the regulator)
346 347
	 * isn't freed, while we are executing this routine.
	 */
348
	for (i = 0; i < count; i++) {
349
		reg = opp_table->regulators[i];
350 351 352 353 354 355
		ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
		if (ret > 0)
			latency_ns += ret * 1000;
	}

	kfree(uV);
356 357
put_opp_table:
	dev_pm_opp_put_opp_table(opp_table);
358 359 360 361 362

	return latency_ns;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
/**
 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
 *					     nanoseconds
 * @dev: device for which we do this operation
 *
 * Return: This function returns the max transition latency, in nanoseconds, to
 * switch from one OPP to other.
 */
unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
{
	return dev_pm_opp_get_max_volt_latency(dev) +
		dev_pm_opp_get_max_clock_latency(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);

378
/**
379
 * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
380 381
 * @dev:	device for which we do this operation
 *
382 383
 * Return: This function returns the frequency of the OPP marked as suspend_opp
 * if one is available, else returns 0;
384
 */
385
unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
386
{
387
	struct opp_table *opp_table;
388
	unsigned long freq = 0;
389

390
	opp_table = _find_opp_table(dev);
391 392
	if (IS_ERR(opp_table))
		return 0;
393

394 395 396 397
	if (opp_table->suspend_opp && opp_table->suspend_opp->available)
		freq = dev_pm_opp_get_freq(opp_table->suspend_opp);

	dev_pm_opp_put_opp_table(opp_table);
398

399
	return freq;
400
}
401
EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
402

403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
int _get_opp_count(struct opp_table *opp_table)
{
	struct dev_pm_opp *opp;
	int count = 0;

	mutex_lock(&opp_table->lock);

	list_for_each_entry(opp, &opp_table->opp_list, node) {
		if (opp->available)
			count++;
	}

	mutex_unlock(&opp_table->lock);

	return count;
}

420
/**
421
 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
422 423
 * @dev:	device for which we do this operation
 *
424
 * Return: This function returns the number of available opps if there are any,
425 426
 * else returns 0 if none or the corresponding error value.
 */
427
int dev_pm_opp_get_opp_count(struct device *dev)
428
{
429
	struct opp_table *opp_table;
430
	int count;
431

432 433 434
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		count = PTR_ERR(opp_table);
435
		dev_dbg(dev, "%s: OPP table not found (%d)\n",
436
			__func__, count);
437
		return count;
438 439
	}

440
	count = _get_opp_count(opp_table);
441 442
	dev_pm_opp_put_opp_table(opp_table);

443 444
	return count;
}
445
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
446

447 448 449
/* Helpers to read keys */
static unsigned long _read_freq(struct dev_pm_opp *opp, int index)
{
450
	return opp->rates[index];
451 452
}

453 454 455 456 457
static unsigned long _read_level(struct dev_pm_opp *opp, int index)
{
	return opp->level;
}

458 459 460 461 462
static unsigned long _read_bw(struct dev_pm_opp *opp, int index)
{
	return opp->bandwidth[index].peak;
}

463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
/* Generic comparison helpers */
static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
			   unsigned long opp_key, unsigned long key)
{
	if (opp_key == key) {
		*opp = temp_opp;
		return true;
	}

	return false;
}

static bool _compare_ceil(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
			  unsigned long opp_key, unsigned long key)
{
	if (opp_key >= key) {
		*opp = temp_opp;
		return true;
	}

	return false;
}

static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
			   unsigned long opp_key, unsigned long key)
{
	if (opp_key > key)
		return true;

	*opp = temp_opp;
	return false;
}

/* Generic key finding helpers */
static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
		unsigned long *key, int index, bool available,
		unsigned long (*read)(struct dev_pm_opp *opp, int index),
		bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
501 502
				unsigned long opp_key, unsigned long key),
		bool (*assert)(struct opp_table *opp_table))
503 504 505
{
	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);

506 507 508 509
	/* Assert that the requirement is met */
	if (assert && !assert(opp_table))
		return ERR_PTR(-EINVAL);

510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
	mutex_lock(&opp_table->lock);

	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
		if (temp_opp->available == available) {
			if (compare(&opp, temp_opp, read(temp_opp, index), *key))
				break;
		}
	}

	/* Increment the reference count of OPP */
	if (!IS_ERR(opp)) {
		*key = read(opp, index);
		dev_pm_opp_get(opp);
	}

	mutex_unlock(&opp_table->lock);

	return opp;
}

static struct dev_pm_opp *
_find_key(struct device *dev, unsigned long *key, int index, bool available,
	  unsigned long (*read)(struct dev_pm_opp *opp, int index),
	  bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
534 535
			  unsigned long opp_key, unsigned long key),
	  bool (*assert)(struct opp_table *opp_table))
536 537 538 539 540 541 542 543 544 545 546 547
{
	struct opp_table *opp_table;
	struct dev_pm_opp *opp;

	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
			PTR_ERR(opp_table));
		return ERR_CAST(opp_table);
	}

	opp = _opp_table_find_key(opp_table, key, index, available, read,
548
				  compare, assert);
549 550 551 552 553 554 555 556

	dev_pm_opp_put_opp_table(opp_table);

	return opp;
}

static struct dev_pm_opp *_find_key_exact(struct device *dev,
		unsigned long key, int index, bool available,
557 558
		unsigned long (*read)(struct dev_pm_opp *opp, int index),
		bool (*assert)(struct opp_table *opp_table))
559 560 561 562 563
{
	/*
	 * The value of key will be updated here, but will be ignored as the
	 * caller doesn't need it.
	 */
564 565
	return _find_key(dev, &key, index, available, read, _compare_exact,
			 assert);
566 567 568 569
}

static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
		unsigned long *key, int index, bool available,
570 571
		unsigned long (*read)(struct dev_pm_opp *opp, int index),
		bool (*assert)(struct opp_table *opp_table))
572 573
{
	return _opp_table_find_key(opp_table, key, index, available, read,
574
				   _compare_ceil, assert);
575 576 577 578
}

static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
		int index, bool available,
579 580
		unsigned long (*read)(struct dev_pm_opp *opp, int index),
		bool (*assert)(struct opp_table *opp_table))
581
{
582 583
	return _find_key(dev, key, index, available, read, _compare_ceil,
			 assert);
584 585 586 587
}

static struct dev_pm_opp *_find_key_floor(struct device *dev,
		unsigned long *key, int index, bool available,
588 589
		unsigned long (*read)(struct dev_pm_opp *opp, int index),
		bool (*assert)(struct opp_table *opp_table))
590
{
591 592
	return _find_key(dev, key, index, available, read, _compare_floor,
			 assert);
593 594
}

595
/**
596
 * dev_pm_opp_find_freq_exact() - search for an exact frequency
597 598
 * @dev:		device for which we do this operation
 * @freq:		frequency to search for
599
 * @available:		true/false - match for available opp
600
 *
601
 * Return: Searches for exact match in the opp table and returns pointer to the
602 603
 * matching opp if found, else returns ERR_PTR in case of error and should
 * be handled using IS_ERR. Error return values can be:
604 605 606
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
607 608 609 610 611 612 613 614
 *
 * Note: available is a modifier for the search. if available=true, then the
 * match is for exact matching frequency and is available in the stored OPP
 * table. if false, the match is for exact frequency which is not available.
 *
 * This provides a mechanism to enable an opp which is not available currently
 * or the opposite as well.
 *
615 616
 * The callers are required to call dev_pm_opp_put() for the returned OPP after
 * use.
617
 */
618
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
619
		unsigned long freq, bool available)
620
{
621 622
	return _find_key_exact(dev, freq, 0, available, _read_freq,
			       assert_single_clk);
623
}
624
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
625

626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
/**
 * dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the
 *					 clock corresponding to the index
 * @dev:	Device for which we do this operation
 * @freq:	frequency to search for
 * @index:	Clock index
 * @available:	true/false - match for available opp
 *
 * Search for the matching exact OPP for the clock corresponding to the
 * specified index from a starting freq for a device.
 *
 * Return: matching *opp , else returns ERR_PTR in case of error and should be
 * handled using IS_ERR. Error return values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
 *
 * The callers are required to call dev_pm_opp_put() for the returned OPP after
 * use.
 */
struct dev_pm_opp *
dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
				   u32 index, bool available)
{
	return _find_key_exact(dev, freq, index, available, _read_freq, NULL);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed);

654 655 656
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
						   unsigned long *freq)
{
657
	return _opp_table_find_key_ceil(opp_table, freq, 0, true, _read_freq,
658
					assert_single_clk);
659 660
}

661
/**
662
 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
663 664 665 666 667 668
 * @dev:	device for which we do this operation
 * @freq:	Start frequency
 *
 * Search for the matching ceil *available* OPP from a starting freq
 * for a device.
 *
669
 * Return: matching *opp and refreshes *freq accordingly, else returns
670 671 672 673 674
 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 * values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
675
 *
676 677
 * The callers are required to call dev_pm_opp_put() for the returned OPP after
 * use.
678
 */
679 680
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
					     unsigned long *freq)
681
{
682
	return _find_key_ceil(dev, freq, 0, true, _read_freq, assert_single_clk);
683
}
684
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
685

686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
/**
 * dev_pm_opp_find_freq_ceil_indexed() - Search for a rounded ceil freq for the
 *					 clock corresponding to the index
 * @dev:	Device for which we do this operation
 * @freq:	Start frequency
 * @index:	Clock index
 *
 * Search for the matching ceil *available* OPP for the clock corresponding to
 * the specified index from a starting freq for a device.
 *
 * Return: matching *opp and refreshes *freq accordingly, else returns
 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 * values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
 *
 * The callers are required to call dev_pm_opp_put() for the returned OPP after
 * use.
 */
struct dev_pm_opp *
dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq,
				  u32 index)
{
	return _find_key_ceil(dev, freq, index, true, _read_freq, NULL);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed);

714
/**
715
 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
716 717 718 719 720 721
 * @dev:	device for which we do this operation
 * @freq:	Start frequency
 *
 * Search for the matching floor *available* OPP from a starting freq
 * for a device.
 *
722
 * Return: matching *opp and refreshes *freq accordingly, else returns
723 724 725 726 727
 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 * values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
728
 *
729 730
 * The callers are required to call dev_pm_opp_put() for the returned OPP after
 * use.
731
 */
732 733
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
					      unsigned long *freq)
734
{
735
	return _find_key_floor(dev, freq, 0, true, _read_freq, assert_single_clk);
736
}
737
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
738

739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
/**
 * dev_pm_opp_find_freq_floor_indexed() - Search for a rounded floor freq for the
 *					  clock corresponding to the index
 * @dev:	Device for which we do this operation
 * @freq:	Start frequency
 * @index:	Clock index
 *
 * Search for the matching floor *available* OPP for the clock corresponding to
 * the specified index from a starting freq for a device.
 *
 * Return: matching *opp and refreshes *freq accordingly, else returns
 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 * values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
 *
 * The callers are required to call dev_pm_opp_put() for the returned OPP after
 * use.
 */
struct dev_pm_opp *
dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq,
				   u32 index)
{
	return _find_key_floor(dev, freq, index, true, _read_freq, NULL);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed);

767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
/**
 * dev_pm_opp_find_level_exact() - search for an exact level
 * @dev:		device for which we do this operation
 * @level:		level to search for
 *
 * Return: Searches for exact match in the opp table and returns pointer to the
 * matching opp if found, else returns ERR_PTR in case of error and should
 * be handled using IS_ERR. Error return values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
 *
 * The callers are required to call dev_pm_opp_put() for the returned OPP after
 * use.
 */
struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
					       unsigned int level)
{
785
	return _find_key_exact(dev, level, 0, true, _read_level, NULL);
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);

/**
 * dev_pm_opp_find_level_ceil() - search for an rounded up level
 * @dev:		device for which we do this operation
 * @level:		level to search for
 *
 * Return: Searches for rounded up match in the opp table and returns pointer
 * to the  matching opp if found, else returns ERR_PTR in case of error and
 * should be handled using IS_ERR. Error return values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
 *
 * The callers are required to call dev_pm_opp_put() for the returned OPP after
 * use.
 */
struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
					      unsigned int *level)
{
807 808
	unsigned long temp = *level;
	struct dev_pm_opp *opp;
809

810
	opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL);
811
	*level = temp;
812 813 814 815
	return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);

816 817 818
/**
 * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth
 * @dev:	device for which we do this operation
Yang Li's avatar
Yang Li committed
819
 * @bw:	start bandwidth
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
 * @index:	which bandwidth to compare, in case of OPPs with several values
 *
 * Search for the matching floor *available* OPP from a starting bandwidth
 * for a device.
 *
 * Return: matching *opp and refreshes *bw accordingly, else returns
 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 * values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
 *
 * The callers are required to call dev_pm_opp_put() for the returned OPP after
 * use.
 */
835 836
struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw,
					   int index)
837
{
838 839
	unsigned long temp = *bw;
	struct dev_pm_opp *opp;
840

841
	opp = _find_key_ceil(dev, &temp, index, true, _read_bw, NULL);
842
	*bw = temp;
843 844 845 846 847 848 849
	return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);

/**
 * dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth
 * @dev:	device for which we do this operation
Yang Li's avatar
Yang Li committed
850
 * @bw:	start bandwidth
851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
 * @index:	which bandwidth to compare, in case of OPPs with several values
 *
 * Search for the matching floor *available* OPP from a starting bandwidth
 * for a device.
 *
 * Return: matching *opp and refreshes *bw accordingly, else returns
 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 * values can be:
 * EINVAL:	for bad pointer
 * ERANGE:	no match found for search
 * ENODEV:	if device not found in list of registered devices
 *
 * The callers are required to call dev_pm_opp_put() for the returned OPP after
 * use.
 */
struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
					    unsigned int *bw, int index)
{
869 870
	unsigned long temp = *bw;
	struct dev_pm_opp *opp;
871

872
	opp = _find_key_floor(dev, &temp, index, true, _read_bw, NULL);
873
	*bw = temp;
874 875 876 877
	return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor);

878
static int _set_opp_voltage(struct device *dev, struct regulator *reg,
879
			    struct dev_pm_opp_supply *supply)
880 881 882 883 884 885 886 887 888 889
{
	int ret;

	/* Regulator not available for device */
	if (IS_ERR(reg)) {
		dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
			PTR_ERR(reg));
		return 0;
	}

890 891
	dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
		supply->u_volt_min, supply->u_volt, supply->u_volt_max);
892

893 894
	ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
					    supply->u_volt, supply->u_volt_max);
895 896
	if (ret)
		dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
897 898
			__func__, supply->u_volt_min, supply->u_volt,
			supply->u_volt_max, ret);
899 900 901 902

	return ret;
}

903 904 905
static int
_opp_config_clk_single(struct device *dev, struct opp_table *opp_table,
		       struct dev_pm_opp *opp, void *data, bool scaling_down)
906
{
907 908
	unsigned long *target = data;
	unsigned long freq;
909 910
	int ret;

911 912 913 914
	/* One of target and opp must be available */
	if (target) {
		freq = *target;
	} else if (opp) {
915
		freq = opp->rates[0];
916 917 918 919 920 921
	} else {
		WARN_ON(1);
		return -EINVAL;
	}

	ret = clk_set_rate(opp_table->clk, freq);
922 923 924
	if (ret) {
		dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
			ret);
925 926
	} else {
		opp_table->rate_clk_single = freq;
927 928 929 930 931
	}

	return ret;
}

932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961
/*
 * Simple implementation for configuring multiple clocks. Configure clocks in
 * the order in which they are present in the array while scaling up.
 */
int dev_pm_opp_config_clks_simple(struct device *dev,
		struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
		bool scaling_down)
{
	int ret, i;

	if (scaling_down) {
		for (i = opp_table->clk_count - 1; i >= 0; i--) {
			ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
			if (ret) {
				dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
					ret);
				return ret;
			}
		}
	} else {
		for (i = 0; i < opp_table->clk_count; i++) {
			ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
			if (ret) {
				dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
					ret);
				return ret;
			}
		}
	}

962
	return 0;
963 964 965
}
EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple);

966 967 968
static int _opp_config_regulator_single(struct device *dev,
			struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
			struct regulator **regulators, unsigned int count)
969
{
970
	struct regulator *reg = regulators[0];
971 972 973
	int ret;

	/* This function only supports single regulator per device */
974
	if (WARN_ON(count > 1)) {
975 976 977 978
		dev_err(dev, "multiple regulators are not supported\n");
		return -EINVAL;
	}

979
	ret = _set_opp_voltage(dev, reg, new_opp->supplies);
980
	if (ret)
981
		return ret;
982

983 984 985 986
	/*
	 * Enable the regulator after setting its voltages, otherwise it breaks
	 * some boot-enabled regulators.
	 */
987
	if (unlikely(!new_opp->opp_table->enabled)) {
988 989 990 991 992
		ret = regulator_enable(reg);
		if (ret < 0)
			dev_warn(dev, "Failed to enable regulator: %d", ret);
	}

993 994 995
	return 0;
}

996
static int _set_opp_bw(const struct opp_table *opp_table,
997
		       struct dev_pm_opp *opp, struct device *dev)
998 999 1000 1001 1002 1003 1004 1005
{
	u32 avg, peak;
	int i, ret;

	if (!opp_table->paths)
		return 0;

	for (i = 0; i < opp_table->path_count; i++) {
1006
		if (!opp) {
1007 1008 1009 1010 1011 1012 1013 1014 1015
			avg = 0;
			peak = 0;
		} else {
			avg = opp->bandwidth[i].avg;
			peak = opp->bandwidth[i].peak;
		}
		ret = icc_set_bw(opp_table->paths[i], avg, peak);
		if (ret) {
			dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
1016
				opp ? "set" : "remove", i, ret);
1017 1018 1019 1020 1021 1022 1023
			return ret;
		}
	}

	return 0;
}

1024 1025
static int _set_performance_state(struct device *dev, struct device *pd_dev,
				  struct dev_pm_opp *opp, int i)
1026
{
1027
	unsigned int pstate = likely(opp) ? opp->required_opps[i]->level: 0;
1028 1029 1030 1031 1032 1033 1034
	int ret;

	if (!pd_dev)
		return 0;

	ret = dev_pm_genpd_set_performance_state(pd_dev, pstate);
	if (ret) {
Viresh Kumar's avatar
Viresh Kumar committed
1035
		dev_err(dev, "Failed to set performance state of %s: %d (%d)\n",
1036 1037 1038 1039 1040 1041
			dev_name(pd_dev), pstate, ret);
	}

	return ret;
}

1042 1043 1044 1045 1046 1047 1048 1049 1050
static int _opp_set_required_opps_generic(struct device *dev,
	struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down)
{
	dev_err(dev, "setting required-opps isn't supported for non-genpd devices\n");
	return -ENOENT;
}

static int _opp_set_required_opps_genpd(struct device *dev,
	struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down)
1051
{
1052 1053
	struct device **genpd_virt_devs =
		opp_table->genpd_virt_devs ? opp_table->genpd_virt_devs : &dev;
1054 1055 1056 1057 1058 1059 1060 1061
	int i, ret = 0;

	/*
	 * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev
	 * after it is freed from another thread.
	 */
	mutex_lock(&opp_table->genpd_virt_dev_lock);

1062
	/* Scaling up? Set required OPPs in normal order, else reverse */
1063
	if (!scaling_down) {
1064
		for (i = 0; i < opp_table->required_opp_count; i++) {
1065
			ret = _set_performance_state(dev, genpd_virt_devs[i], opp, i);
1066 1067 1068 1069 1070
			if (ret)
				break;
		}
	} else {
		for (i = opp_table->required_opp_count - 1; i >= 0; i--) {
1071
			ret = _set_performance_state(dev, genpd_virt_devs[i], opp, i);
1072 1073
			if (ret)
				break;
1074 1075
		}
	}
1076

1077 1078 1079 1080 1081
	mutex_unlock(&opp_table->genpd_virt_dev_lock);

	return ret;
}

1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
/* This is only called for PM domain for now */
static int _set_required_opps(struct device *dev, struct opp_table *opp_table,
			      struct dev_pm_opp *opp, bool up)
{
	/* required-opps not fully initialized yet */
	if (lazy_linking_pending(opp_table))
		return -EBUSY;

	if (opp_table->set_required_opps)
		return opp_table->set_required_opps(dev, opp_table, opp, up);

	return 0;
}

/* Update set_required_opps handler */
void _update_set_required_opps(struct opp_table *opp_table)
{
	/* Already set */
	if (opp_table->set_required_opps)
		return;

	/* All required OPPs will belong to genpd or none */
	if (opp_table->required_opp_tables[0]->is_genpd)
		opp_table->set_required_opps = _opp_set_required_opps_genpd;
	else
		opp_table->set_required_opps = _opp_set_required_opps_generic;
}

1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
static void _find_current_opp(struct device *dev, struct opp_table *opp_table)
{
	struct dev_pm_opp *opp = ERR_PTR(-ENODEV);
	unsigned long freq;

	if (!IS_ERR(opp_table->clk)) {
		freq = clk_get_rate(opp_table->clk);
		opp = _find_freq_ceil(opp_table, &freq);
	}

	/*
	 * Unable to find the current OPP ? Pick the first from the list since
	 * it is in ascending order, otherwise rest of the code will need to
	 * make special checks to validate current_opp.
	 */
	if (IS_ERR(opp)) {
		mutex_lock(&opp_table->lock);
		opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node);
		dev_pm_opp_get(opp);
		mutex_unlock(&opp_table->lock);
	}

	opp_table->current_opp = opp;
}

1135
static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
{
	int ret;

	if (!opp_table->enabled)
		return 0;

	/*
	 * Some drivers need to support cases where some platforms may
	 * have OPP table for the device, while others don't and
	 * opp_set_rate() just needs to behave like clk_set_rate().
	 */
	if (!_get_opp_count(opp_table))
		return 0;

1150
	ret = _set_opp_bw(opp_table, NULL, dev);
1151 1152 1153 1154 1155 1156
	if (ret)
		return ret;

	if (opp_table->regulators)
		regulator_disable(opp_table->regulators[0]);

1157
	ret = _set_required_opps(dev, opp_table, NULL, false);
1158 1159 1160 1161 1162

	opp_table->enabled = false;
	return ret;
}

1163
static int _set_opp(struct device *dev, struct opp_table *opp_table,
1164
		    struct dev_pm_opp *opp, void *clk_data, bool forced)
1165
{
1166
	struct dev_pm_opp *old_opp;
1167
	int scaling_down, ret;
1168

1169 1170
	if (unlikely(!opp))
		return _disable_opp_table(dev, opp_table);
1171

1172 1173 1174
	/* Find the currently set OPP if we don't know already */
	if (unlikely(!opp_table->current_opp))
		_find_current_opp(dev, opp_table);
1175

1176 1177 1178
	old_opp = opp_table->current_opp;

	/* Return early if nothing to do */
1179
	if (!forced && old_opp == opp && opp_table->enabled) {
1180
		dev_dbg_ratelimited(dev, "%s: OPPs are same, nothing to do\n", __func__);
1181
		return 0;
1182 1183
	}

1184
	dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n",
1185 1186
		__func__, old_opp->rates[0], opp->rates[0], old_opp->level,
		opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0,
1187 1188
		opp->bandwidth ? opp->bandwidth[0].peak : 0);

1189
	scaling_down = _opp_compare_key(opp_table, old_opp, opp);
1190 1191
	if (scaling_down == -1)
		scaling_down = 0;
1192

1193
	/* Scaling up? Configure required OPPs before frequency */
1194
	if (!scaling_down) {
1195
		ret = _set_required_opps(dev, opp_table, opp, true);
1196 1197 1198 1199 1200 1201 1202 1203
		if (ret) {
			dev_err(dev, "Failed to set required opps: %d\n", ret);
			return ret;
		}

		ret = _set_opp_bw(opp_table, opp, dev);
		if (ret) {
			dev_err(dev, "Failed to set bw: %d\n", ret);
1204
			return ret;
1205
		}
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216

		if (opp_table->config_regulators) {
			ret = opp_table->config_regulators(dev, old_opp, opp,
							   opp_table->regulators,
							   opp_table->regulator_count);
			if (ret) {
				dev_err(dev, "Failed to set regulator voltages: %d\n",
					ret);
				return ret;
			}
		}
1217 1218
	}

1219 1220 1221 1222 1223
	if (opp_table->config_clks) {
		ret = opp_table->config_clks(dev, opp_table, opp, clk_data, scaling_down);
		if (ret)
			return ret;
	}
1224

1225
	/* Scaling down? Configure required OPPs after frequency */
1226
	if (scaling_down) {
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
		if (opp_table->config_regulators) {
			ret = opp_table->config_regulators(dev, old_opp, opp,
							   opp_table->regulators,
							   opp_table->regulator_count);
			if (ret) {
				dev_err(dev, "Failed to set regulator voltages: %d\n",
					ret);
				return ret;
			}
		}

1238 1239 1240 1241 1242 1243
		ret = _set_opp_bw(opp_table, opp, dev);
		if (ret) {
			dev_err(dev, "Failed to set bw: %d\n", ret);
			return ret;
		}

1244
		ret = _set_required_opps(dev, opp_table, opp, false);
1245
		if (ret) {
1246
			dev_err(dev, "Failed to set required opps: %d\n", ret);
1247 1248
			return ret;
		}
1249 1250
	}

1251 1252
	opp_table->enabled = true;
	dev_pm_opp_put(old_opp);
1253

1254 1255 1256
	/* Make sure current_opp doesn't get freed */
	dev_pm_opp_get(opp);
	opp_table->current_opp = opp;
1257

1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
	return ret;
}

/**
 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
 * @dev:	 device for which we do this operation
 * @target_freq: frequency to achieve
 *
 * This configures the power-supplies to the levels specified by the OPP
 * corresponding to the target_freq, and programs the clock to a value <=
 * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax
 * provided by the opp, should have already rounded to the target OPP's
 * frequency.
 */
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
	struct opp_table *opp_table;
	unsigned long freq = 0, temp_freq;
	struct dev_pm_opp *opp = NULL;
1277
	bool forced = false;
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
	int ret;

	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		dev_err(dev, "%s: device's opp table doesn't exist\n", __func__);
		return PTR_ERR(opp_table);
	}

	if (target_freq) {
		/*
		 * For IO devices which require an OPP on some platforms/SoCs
		 * while just needing to scale the clock on some others
		 * we look for empty OPP tables with just a clock handle and
		 * scale only the clk. This makes dev_pm_opp_set_rate()
		 * equivalent to a clk_set_rate()
		 */
		if (!_get_opp_count(opp_table)) {
1295 1296
			ret = opp_table->config_clks(dev, opp_table, NULL,
						     &target_freq, false);
1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
			goto put_opp_table;
		}

		freq = clk_round_rate(opp_table->clk, target_freq);
		if ((long)freq <= 0)
			freq = target_freq;

		/*
		 * The clock driver may support finer resolution of the
		 * frequencies than the OPP table, don't update the frequency we
		 * pass to clk_set_rate() here.
		 */
		temp_freq = freq;
		opp = _find_freq_ceil(opp_table, &temp_freq);
		if (IS_ERR(opp)) {
			ret = PTR_ERR(opp);
			dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
				__func__, freq, ret);
			goto put_opp_table;
		}
1317 1318 1319 1320 1321 1322 1323 1324 1325

		/*
		 * An OPP entry specifies the highest frequency at which other
		 * properties of the OPP entry apply. Even if the new OPP is
		 * same as the old one, we may still reach here for a different
		 * value of the frequency. In such a case, do not abort but
		 * configure the hardware to the desired frequency forcefully.
		 */
		forced = opp_table->rate_clk_single != target_freq;
1326 1327
	}

1328
	ret = _set_opp(dev, opp_table, opp, &target_freq, forced);
1329 1330 1331

	if (target_freq)
		dev_pm_opp_put(opp);
1332

1333
put_opp_table:
1334
	dev_pm_opp_put_opp_table(opp_table);
1335
	return ret;
1336 1337 1338
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);

1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
/**
 * dev_pm_opp_set_opp() - Configure device for OPP
 * @dev: device for which we do this operation
 * @opp: OPP to set to
 *
 * This configures the device based on the properties of the OPP passed to this
 * routine.
 *
 * Return: 0 on success, a negative error number otherwise.
 */
int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
{
	struct opp_table *opp_table;
	int ret;

	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		dev_err(dev, "%s: device opp doesn't exist\n", __func__);
		return PTR_ERR(opp_table);
	}

1360
	ret = _set_opp(dev, opp_table, opp, NULL, false);
1361 1362 1363 1364 1365 1366
	dev_pm_opp_put_opp_table(opp_table);

	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp);

1367 1368 1369
/* OPP-dev Helpers */
static void _remove_opp_dev(struct opp_device *opp_dev,
			    struct opp_table *opp_table)
1370
{
1371 1372
	opp_debug_unregister(opp_dev, opp_table);
	list_del(&opp_dev->node);
1373
	kfree(opp_dev);
1374 1375
}

1376 1377
struct opp_device *_add_opp_dev(const struct device *dev,
				struct opp_table *opp_table)
1378
{
1379
	struct opp_device *opp_dev;
1380

1381 1382
	opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
	if (!opp_dev)
1383 1384
		return NULL;

1385 1386
	/* Initialize opp-dev */
	opp_dev->dev = dev;
1387

1388
	mutex_lock(&opp_table->lock);
1389
	list_add(&opp_dev->node, &opp_table->dev_list);
1390
	mutex_unlock(&opp_table->lock);
1391

1392
	/* Create debugfs entries for the opp_table */
1393
	opp_debug_register(opp_dev, opp_table);
1394 1395 1396 1397

	return opp_dev;
}

1398
static struct opp_table *_allocate_opp_table(struct device *dev, int index)
1399
{
1400 1401
	struct opp_table *opp_table;
	struct opp_device *opp_dev;
Viresh Kumar's avatar
Viresh Kumar committed
1402
	int ret;
1403 1404

	/*
1405
	 * Allocate a new OPP table. In the infrequent case where a new
1406 1407
	 * device is needed to be added, we pay this penalty.
	 */
1408 1409
	opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
	if (!opp_table)
1410
		return ERR_PTR(-ENOMEM);
1411

1412
	mutex_init(&opp_table->lock);
1413
	mutex_init(&opp_table->genpd_virt_dev_lock);
1414
	INIT_LIST_HEAD(&opp_table->dev_list);
1415
	INIT_LIST_HEAD(&opp_table->lazy);
1416

1417 1418
	opp_table->clk = ERR_PTR(-ENODEV);

1419 1420 1421
	/* Mark regulator count uninitialized */
	opp_table->regulator_count = -1;

1422 1423
	opp_dev = _add_opp_dev(dev, opp_table);
	if (!opp_dev) {
1424 1425
		ret = -ENOMEM;
		goto err;
1426 1427
	}

1428
	_of_init_opp_table(opp_table, dev, index);
1429

1430 1431
	/* Find interconnect path(s) for the device */
	ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
1432 1433
	if (ret) {
		if (ret == -EPROBE_DEFER)
1434
			goto remove_opp_dev;
1435

1436 1437
		dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
			 __func__, ret);
1438
	}
1439

1440
	BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
1441
	INIT_LIST_HEAD(&opp_table->opp_list);
1442
	kref_init(&opp_table->kref);
1443

1444
	return opp_table;
1445

1446
remove_opp_dev:
1447
	_of_clear_opp_table(opp_table);
1448
	_remove_opp_dev(opp_dev, opp_table);
1449 1450
	mutex_destroy(&opp_table->genpd_virt_dev_lock);
	mutex_destroy(&opp_table->lock);
1451 1452 1453
err:
	kfree(opp_table);
	return ERR_PTR(ret);
1454 1455
}

1456
void _get_opp_table_kref(struct opp_table *opp_table)
1457
{
1458 1459 1460
	kref_get(&opp_table->kref);
}

1461 1462 1463 1464
static struct opp_table *_update_opp_table_clk(struct device *dev,
					       struct opp_table *opp_table,
					       bool getclk)
{
1465 1466
	int ret;

1467
	/*
1468
	 * Return early if we don't need to get clk or we have already done it
1469 1470
	 * earlier.
	 */
1471 1472
	if (!getclk || IS_ERR(opp_table) || !IS_ERR(opp_table->clk) ||
	    opp_table->clks)
1473 1474 1475 1476 1477
		return opp_table;

	/* Find clk for the device */
	opp_table->clk = clk_get(dev, NULL);

1478
	ret = PTR_ERR_OR_ZERO(opp_table->clk);
1479 1480 1481
	if (!ret) {
		opp_table->config_clks = _opp_config_clk_single;
		opp_table->clk_count = 1;
1482
		return opp_table;
1483
	}
1484

1485
	if (ret == -ENOENT) {
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
		/*
		 * There are few platforms which don't want the OPP core to
		 * manage device's clock settings. In such cases neither the
		 * platform provides the clks explicitly to us, nor the DT
		 * contains a valid clk entry. The OPP nodes in DT may still
		 * contain "opp-hz" property though, which we need to parse and
		 * allow the platform to find an OPP based on freq later on.
		 *
		 * This is a simple solution to take care of such corner cases,
		 * i.e. make the clk_count 1, which lets us allocate space for
		 * frequency in opp->rates and also parse the entries in DT.
		 */
		opp_table->clk_count = 1;

1500
		dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
1501
		return opp_table;
1502 1503
	}

1504 1505 1506 1507
	dev_pm_opp_put_opp_table(opp_table);
	dev_err_probe(dev, ret, "Couldn't find clock\n");

	return ERR_PTR(ret);
1508 1509
}

1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
/*
 * We need to make sure that the OPP table for a device doesn't get added twice,
 * if this routine gets called in parallel with the same device pointer.
 *
 * The simplest way to enforce that is to perform everything (find existing
 * table and if not found, create a new one) under the opp_table_lock, so only
 * one creator gets access to the same. But that expands the critical section
 * under the lock and may end up causing circular dependencies with frameworks
 * like debugfs, interconnect or clock framework as they may be direct or
 * indirect users of OPP core.
 *
 * And for that reason we have to go for a bit tricky implementation here, which
 * uses the opp_tables_busy flag to indicate if another creator is in the middle
 * of adding an OPP table and others should wait for it to finish.
 */
1525 1526
struct opp_table *_add_opp_table_indexed(struct device *dev, int index,
					 bool getclk)
1527 1528 1529
{
	struct opp_table *opp_table;

1530
again:
1531 1532
	mutex_lock(&opp_table_lock);

1533 1534
	opp_table = _find_opp_table_unlocked(dev);
	if (!IS_ERR(opp_table))
1535 1536
		goto unlock;

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
	/*
	 * The opp_tables list or an OPP table's dev_list is getting updated by
	 * another user, wait for it to finish.
	 */
	if (unlikely(opp_tables_busy)) {
		mutex_unlock(&opp_table_lock);
		cpu_relax();
		goto again;
	}

	opp_tables_busy = true;
1548
	opp_table = _managed_opp(dev, index);
1549 1550 1551 1552

	/* Drop the lock to reduce the size of critical section */
	mutex_unlock(&opp_table_lock);

1553
	if (opp_table) {
1554
		if (!_add_opp_dev(dev, opp_table)) {
1555
			dev_pm_opp_put_opp_table(opp_table);
1556
			opp_table = ERR_PTR(-ENOMEM);
1557
		}
1558 1559 1560 1561 1562 1563 1564 1565

		mutex_lock(&opp_table_lock);
	} else {
		opp_table = _allocate_opp_table(dev, index);

		mutex_lock(&opp_table_lock);
		if (!IS_ERR(opp_table))
			list_add(&opp_table->node, &opp_tables);
1566 1567
	}

1568
	opp_tables_busy = false;
1569 1570 1571 1572

unlock:
	mutex_unlock(&opp_table_lock);

1573
	return _update_opp_table_clk(dev, opp_table, getclk);
1574
}
1575

1576
static struct opp_table *_add_opp_table(struct device *dev, bool getclk)
1577
{
1578
	return _add_opp_table_indexed(dev, 0, getclk);
1579
}
1580

1581
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
1582
{
1583
	return _find_opp_table(dev);
1584
}
1585
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
1586

1587
static void _opp_table_kref_release(struct kref *kref)
1588 1589
{
	struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
1590
	struct opp_device *opp_dev, *temp;
1591
	int i;
1592

1593 1594 1595 1596
	/* Drop the lock as soon as we can */
	list_del(&opp_table->node);
	mutex_unlock(&opp_table_lock);

1597 1598 1599
	if (opp_table->current_opp)
		dev_pm_opp_put(opp_table->current_opp);

1600 1601
	_of_clear_opp_table(opp_table);

1602
	/* Release automatically acquired single clk */
1603 1604 1605
	if (!IS_ERR(opp_table->clk))
		clk_put(opp_table->clk);

1606 1607 1608 1609 1610 1611
	if (opp_table->paths) {
		for (i = 0; i < opp_table->path_count; i++)
			icc_put(opp_table->paths[i]);
		kfree(opp_table->paths);
	}

1612
	WARN_ON(!list_empty(&opp_table->opp_list));
1613

1614
	list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node)
1615
		_remove_opp_dev(opp_dev, opp_table);
1616

1617
	mutex_destroy(&opp_table->genpd_virt_dev_lock);
1618
	mutex_destroy(&opp_table->lock);
1619
	kfree(opp_table);
1620 1621 1622 1623 1624 1625 1626 1627 1628
}

void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
{
	kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
		       &opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);

1629
void _opp_free(struct dev_pm_opp *opp)
1630 1631 1632 1633
{
	kfree(opp);
}

1634
static void _opp_kref_release(struct kref *kref)
1635
{
1636 1637 1638 1639 1640 1641
	struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
	struct opp_table *opp_table = opp->opp_table;

	list_del(&opp->node);
	mutex_unlock(&opp_table->lock);

1642 1643 1644 1645
	/*
	 * Notify the changes in the availability of the operable
	 * frequency/voltage list.
	 */
1646
	blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
1647
	_of_clear_opp(opp_table, opp);
Viresh Kumar's avatar
Viresh Kumar committed
1648
	opp_debug_remove_one(opp);
1649
	kfree(opp);
1650
}
1651

1652
void dev_pm_opp_get(struct dev_pm_opp *opp)
1653 1654 1655 1656
{
	kref_get(&opp->kref);
}

1657 1658
void dev_pm_opp_put(struct dev_pm_opp *opp)
{
1659
	kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
1660 1661 1662
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put);

1663
/**
1664
 * dev_pm_opp_remove()  - Remove an OPP from OPP table
1665 1666 1667
 * @dev:	device for which we do this operation
 * @freq:	OPP to remove with matching 'freq'
 *
1668
 * This function removes an opp from the opp table.
1669 1670 1671
 */
void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
1672
	struct dev_pm_opp *opp = NULL, *iter;
1673
	struct opp_table *opp_table;
1674

1675 1676
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
1677
		return;
1678

1679 1680 1681
	if (!assert_single_clk(opp_table))
		goto put_table;

1682 1683
	mutex_lock(&opp_table->lock);

1684
	list_for_each_entry(iter, &opp_table->opp_list, node) {
1685
		if (iter->rates[0] == freq) {
1686
			opp = iter;
1687 1688 1689 1690
			break;
		}
	}

1691 1692
	mutex_unlock(&opp_table->lock);

1693
	if (opp) {
1694
		dev_pm_opp_put(opp);
1695 1696 1697

		/* Drop the reference taken by dev_pm_opp_add() */
		dev_pm_opp_put_opp_table(opp_table);
1698
	} else {
1699 1700 1701 1702
		dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
			 __func__, freq);
	}

1703
put_table:
1704
	/* Drop the reference taken by _find_opp_table() */
1705
	dev_pm_opp_put_opp_table(opp_table);
1706 1707 1708
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);

1709 1710 1711 1712 1713 1714 1715
static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table,
					bool dynamic)
{
	struct dev_pm_opp *opp = NULL, *temp;

	mutex_lock(&opp_table->lock);
	list_for_each_entry(temp, &opp_table->opp_list, node) {
1716 1717 1718 1719 1720
		/*
		 * Refcount must be dropped only once for each OPP by OPP core,
		 * do that with help of "removed" flag.
		 */
		if (!temp->removed && dynamic == temp->dynamic) {
1721 1722 1723 1724 1725 1726 1727 1728 1729
			opp = temp;
			break;
		}
	}

	mutex_unlock(&opp_table->lock);
	return opp;
}

1730 1731 1732 1733 1734 1735
/*
 * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to
 * happen lock less to avoid circular dependency issues. This routine must be
 * called without the opp_table->lock held.
 */
static void _opp_remove_all(struct opp_table *opp_table, bool dynamic)
1736
{
1737
	struct dev_pm_opp *opp;
1738

1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750
	while ((opp = _opp_get_next(opp_table, dynamic))) {
		opp->removed = true;
		dev_pm_opp_put(opp);

		/* Drop the references taken by dev_pm_opp_add() */
		if (dynamic)
			dev_pm_opp_put_opp_table(opp_table);
	}
}

bool _opp_remove_all_static(struct opp_table *opp_table)
{
1751 1752
	mutex_lock(&opp_table->lock);

1753
	if (!opp_table->parsed_static_opps) {
1754 1755
		mutex_unlock(&opp_table->lock);
		return false;
1756 1757
	}

1758 1759 1760
	if (--opp_table->parsed_static_opps) {
		mutex_unlock(&opp_table->lock);
		return true;
1761 1762 1763
	}

	mutex_unlock(&opp_table->lock);
1764

1765
	_opp_remove_all(opp_table, false);
1766
	return true;
1767 1768
}

1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
/**
 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
 * @dev:	device for which we do this operation
 *
 * This function removes all dynamically created OPPs from the opp table.
 */
void dev_pm_opp_remove_all_dynamic(struct device *dev)
{
	struct opp_table *opp_table;

	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
		return;

1783
	_opp_remove_all(opp_table, true);
1784 1785 1786 1787 1788 1789

	/* Drop the reference taken by _find_opp_table() */
	dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);

1790
struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table)
1791
{
1792
	struct dev_pm_opp *opp;
1793
	int supply_count, supply_size, icc_size, clk_size;
1794

1795
	/* Allocate space for at least one supply */
1796 1797
	supply_count = opp_table->regulator_count > 0 ?
			opp_table->regulator_count : 1;
1798
	supply_size = sizeof(*opp->supplies) * supply_count;
1799
	clk_size = sizeof(*opp->rates) * opp_table->clk_count;
1800
	icc_size = sizeof(*opp->bandwidth) * opp_table->path_count;
1801

1802
	/* allocate new OPP node and supplies structures */
1803
	opp = kzalloc(sizeof(*opp) + supply_size + clk_size + icc_size, GFP_KERNEL);
1804
	if (!opp)
1805 1806
		return NULL;

1807
	/* Put the supplies, bw and clock at the end of the OPP structure */
1808
	opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
1809 1810 1811

	opp->rates = (unsigned long *)(opp->supplies + supply_count);

1812
	if (icc_size)
1813 1814
		opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->rates + opp_table->clk_count);

1815 1816
	INIT_LIST_HEAD(&opp->node);

1817 1818 1819
	return opp;
}

1820
static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
1821
					 struct opp_table *opp_table)
1822
{
1823 1824 1825
	struct regulator *reg;
	int i;

1826 1827 1828
	if (!opp_table->regulators)
		return true;

1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839
	for (i = 0; i < opp_table->regulator_count; i++) {
		reg = opp_table->regulators[i];

		if (!regulator_is_supported_voltage(reg,
					opp->supplies[i].u_volt_min,
					opp->supplies[i].u_volt_max)) {
			pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
				__func__, opp->supplies[i].u_volt_min,
				opp->supplies[i].u_volt_max);
			return false;
		}
1840 1841 1842 1843 1844
	}

	return true;
}

1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858
static int _opp_compare_rate(struct opp_table *opp_table,
			     struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
{
	int i;

	for (i = 0; i < opp_table->clk_count; i++) {
		if (opp1->rates[i] != opp2->rates[i])
			return opp1->rates[i] < opp2->rates[i] ? -1 : 1;
	}

	/* Same rates for both OPPs */
	return 0;
}

1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
static int _opp_compare_bw(struct opp_table *opp_table, struct dev_pm_opp *opp1,
			   struct dev_pm_opp *opp2)
{
	int i;

	for (i = 0; i < opp_table->path_count; i++) {
		if (opp1->bandwidth[i].peak != opp2->bandwidth[i].peak)
			return opp1->bandwidth[i].peak < opp2->bandwidth[i].peak ? -1 : 1;
	}

	/* Same bw for both OPPs */
	return 0;
}

1873 1874 1875 1876 1877 1878
/*
 * Returns
 * 0: opp1 == opp2
 * 1: opp1 > opp2
 * -1: opp1 < opp2
 */
1879 1880
int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1,
		     struct dev_pm_opp *opp2)
1881
{
1882 1883 1884 1885 1886 1887
	int ret;

	ret = _opp_compare_rate(opp_table, opp1, opp2);
	if (ret)
		return ret;

1888 1889 1890
	ret = _opp_compare_bw(opp_table, opp1, opp2);
	if (ret)
		return ret;
1891

1892 1893
	if (opp1->level != opp2->level)
		return opp1->level < opp2->level ? -1 : 1;
1894 1895

	/* Duplicate OPPs */
1896 1897 1898
	return 0;
}

1899 1900 1901
static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
			     struct opp_table *opp_table,
			     struct list_head **head)
1902 1903
{
	struct dev_pm_opp *opp;
1904
	int opp_cmp;
1905 1906 1907 1908 1909

	/*
	 * Insert new OPP in order of increasing frequency and discard if
	 * already present.
	 *
1910
	 * Need to use &opp_table->opp_list in the condition part of the 'for'
1911 1912 1913
	 * loop, don't replace it with head otherwise it will become an infinite
	 * loop.
	 */
1914
	list_for_each_entry(opp, &opp_table->opp_list, node) {
1915
		opp_cmp = _opp_compare_key(opp_table, new_opp, opp);
1916
		if (opp_cmp > 0) {
1917
			*head = &opp->node;
1918 1919 1920
			continue;
		}

1921
		if (opp_cmp < 0)
1922
			return 0;
1923 1924

		/* Duplicate OPPs */
1925
		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
1926 1927
			 __func__, opp->rates[0], opp->supplies[0].u_volt,
			 opp->available, new_opp->rates[0],
1928
			 new_opp->supplies[0].u_volt, new_opp->available);
1929

1930
		/* Should we compare voltages for all regulators here ? */
1931 1932 1933 1934 1935 1936 1937
		return opp->available &&
		       new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
	}

	return 0;
}

1938 1939 1940 1941 1942 1943 1944 1945 1946 1947
void _required_opps_available(struct dev_pm_opp *opp, int count)
{
	int i;

	for (i = 0; i < count; i++) {
		if (opp->required_opps[i]->available)
			continue;

		opp->available = false;
		pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
1948
			 __func__, opp->required_opps[i]->np, opp->rates[0]);
1949 1950 1951 1952
		return;
	}
}

1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963
/*
 * Returns:
 * 0: On success. And appropriate error message for duplicate OPPs.
 * -EBUSY: For OPP with same freq/volt and is available. The callers of
 *  _opp_add() must return 0 if they receive -EBUSY from it. This is to make
 *  sure we don't print error messages unnecessarily if different parts of
 *  kernel try to initialize the OPP table.
 * -EEXIST: For OPP with same freq but different volt or is unavailable. This
 *  should be considered an error by the callers of _opp_add().
 */
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
1964
	     struct opp_table *opp_table)
1965 1966 1967 1968 1969 1970
{
	struct list_head *head;
	int ret;

	mutex_lock(&opp_table->lock);
	head = &opp_table->opp_list;
1971

1972 1973 1974 1975
	ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
	if (ret) {
		mutex_unlock(&opp_table->lock);
		return ret;
1976 1977
	}

1978
	list_add(&new_opp->node, head);
1979 1980 1981
	mutex_unlock(&opp_table->lock);

	new_opp->opp_table = opp_table;
1982
	kref_init(&new_opp->kref);
1983

1984
	opp_debug_create_one(new_opp, opp_table);
Viresh Kumar's avatar
Viresh Kumar committed
1985

1986
	if (!_opp_supported_by_regulators(new_opp, opp_table)) {
1987 1988
		new_opp->available = false;
		dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1989
			 __func__, new_opp->rates[0]);
1990 1991
	}

1992 1993 1994
	/* required-opps not fully initialized yet */
	if (lazy_linking_pending(opp_table))
		return 0;
1995

1996
	_required_opps_available(new_opp, opp_table->required_opp_count);
1997

1998 1999 2000
	return 0;
}

2001
/**
2002
 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
2003
 * @opp_table:	OPP table
2004
 * @dev:	device for which we do this operation
2005
 * @data:	The OPP data for the OPP to add
2006 2007
 * @dynamic:	Dynamically added OPPs.
 *
2008
 * This function adds an opp definition to the opp table and returns status.
2009 2010 2011
 * The opp is made available by default and it can be controlled using
 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
 *
2012 2013
 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
 * and freed by dev_pm_opp_of_remove_table.
2014 2015 2016 2017 2018 2019 2020 2021
 *
 * Return:
 * 0		On success OR
 *		Duplicate OPPs (both freq and volt are same) and opp->available
 * -EEXIST	Freq are same and volt are different OR
 *		Duplicate OPPs (both freq and volt are same) and !opp->available
 * -ENOMEM	Memory allocation failure
 */
2022
int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
2023
		struct dev_pm_opp_data *data, bool dynamic)
2024
{
2025
	struct dev_pm_opp *new_opp;
2026
	unsigned long tol, u_volt = data->u_volt;
2027
	int ret;
2028

2029 2030 2031
	if (!assert_single_clk(opp_table))
		return -EINVAL;

2032 2033 2034
	new_opp = _opp_allocate(opp_table);
	if (!new_opp)
		return -ENOMEM;
2035

2036
	/* populate the opp table */
2037
	new_opp->rates[0] = data->freq;
2038
	new_opp->level = data->level;
2039
	tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
2040 2041 2042
	new_opp->supplies[0].u_volt = u_volt;
	new_opp->supplies[0].u_volt_min = u_volt - tol;
	new_opp->supplies[0].u_volt_max = u_volt + tol;
2043
	new_opp->available = true;
2044
	new_opp->dynamic = dynamic;
2045

2046
	ret = _opp_add(dev, new_opp, opp_table);
2047 2048 2049 2050
	if (ret) {
		/* Don't return error for duplicate OPPs */
		if (ret == -EBUSY)
			ret = 0;
2051
		goto free_opp;
2052
	}
2053

2054 2055 2056 2057
	/*
	 * Notify the changes in the availability of the operable
	 * frequency/voltage list.
	 */
2058
	blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
2059
	return 0;
2060 2061

free_opp:
2062 2063
	_opp_free(new_opp);

2064
	return ret;
2065
}
2066

2067
/**
2068
 * _opp_set_supported_hw() - Set supported platforms
2069 2070 2071 2072 2073 2074 2075 2076 2077
 * @dev: Device for which supported-hw has to be set.
 * @versions: Array of hierarchy of versions to match.
 * @count: Number of elements in the array.
 *
 * This is required only for the V2 bindings, and it enables a platform to
 * specify the hierarchy of versions it supports. OPP layer will then enable
 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
 * property.
 */
2078 2079
static int _opp_set_supported_hw(struct opp_table *opp_table,
				 const u32 *versions, unsigned int count)
2080
{
2081 2082
	/* Another CPU that shares the OPP table has set the property ? */
	if (opp_table->supported_hw)
2083
		return 0;
2084

2085
	opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
2086
					GFP_KERNEL);
2087 2088
	if (!opp_table->supported_hw)
		return -ENOMEM;
2089

2090
	opp_table->supported_hw_count = count;
2091

2092
	return 0;
2093 2094 2095
}

/**
2096 2097
 * _opp_put_supported_hw() - Releases resources blocked for supported hw
 * @opp_table: OPP table returned by _opp_set_supported_hw().
2098 2099
 *
 * This is required only for the V2 bindings, and is called for a matching
2100
 * _opp_set_supported_hw(). Until this is called, the opp_table structure
2101 2102
 * will not be freed.
 */
2103
static void _opp_put_supported_hw(struct opp_table *opp_table)
2104
{
2105 2106 2107 2108 2109
	if (opp_table->supported_hw) {
		kfree(opp_table->supported_hw);
		opp_table->supported_hw = NULL;
		opp_table->supported_hw_count = 0;
	}
2110 2111
}

2112
/**
2113
 * _opp_set_prop_name() - Set prop-extn name
2114
 * @dev: Device for which the prop-name has to be set.
2115 2116 2117 2118 2119 2120 2121
 * @name: name to postfix to properties.
 *
 * This is required only for the V2 bindings, and it enables a platform to
 * specify the extn to be used for certain property names. The properties to
 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
 * should postfix the property name with -<name> while looking for them.
 */
2122
static int _opp_set_prop_name(struct opp_table *opp_table, const char *name)
2123
{
2124
	/* Another CPU that shares the OPP table has set the property ? */
2125
	if (!opp_table->prop_name) {
2126 2127 2128
		opp_table->prop_name = kstrdup(name, GFP_KERNEL);
		if (!opp_table->prop_name)
			return -ENOMEM;
2129 2130
	}

2131
	return 0;
2132 2133 2134
}

/**
2135 2136
 * _opp_put_prop_name() - Releases resources blocked for prop-name
 * @opp_table: OPP table returned by _opp_set_prop_name().
2137 2138
 *
 * This is required only for the V2 bindings, and is called for a matching
2139
 * _opp_set_prop_name(). Until this is called, the opp_table structure
2140 2141
 * will not be freed.
 */
2142
static void _opp_put_prop_name(struct opp_table *opp_table)
2143
{
2144 2145 2146 2147
	if (opp_table->prop_name) {
		kfree(opp_table->prop_name);
		opp_table->prop_name = NULL;
	}
2148 2149
}

2150
/**
2151
 * _opp_set_regulators() - Set regulator names for the device
2152
 * @dev: Device for which regulator name is being set.
2153 2154
 * @names: Array of pointers to the names of the regulator.
 * @count: Number of regulators.
2155 2156
 *
 * In order to support OPP switching, OPP layer needs to know the name of the
2157 2158
 * device's regulators, as the core would be required to switch voltages as
 * well.
2159 2160 2161
 *
 * This must be called before any OPPs are initialized for the device.
 */
2162 2163
static int _opp_set_regulators(struct opp_table *opp_table, struct device *dev,
			       const char * const names[])
2164
{
2165
	const char * const *temp = names;
2166
	struct regulator *reg;
2167 2168 2169 2170 2171 2172 2173
	int count = 0, ret, i;

	/* Count number of regulators */
	while (*temp++)
		count++;

	if (!count)
2174
		return -EINVAL;
2175

2176 2177
	/* Another CPU that shares the OPP table has set the regulators ? */
	if (opp_table->regulators)
2178
		return 0;
2179 2180 2181 2182

	opp_table->regulators = kmalloc_array(count,
					      sizeof(*opp_table->regulators),
					      GFP_KERNEL);
2183 2184
	if (!opp_table->regulators)
		return -ENOMEM;
2185

2186 2187 2188
	for (i = 0; i < count; i++) {
		reg = regulator_get_optional(dev, names[i]);
		if (IS_ERR(reg)) {
2189 2190 2191
			ret = dev_err_probe(dev, PTR_ERR(reg),
					    "%s: no regulator (%s) found\n",
					    __func__, names[i]);
2192 2193 2194 2195 2196 2197 2198
			goto free_regulators;
		}

		opp_table->regulators[i] = reg;
	}

	opp_table->regulator_count = count;
2199

2200 2201 2202 2203
	/* Set generic config_regulators() for single regulators here */
	if (count == 1)
		opp_table->config_regulators = _opp_config_regulator_single;

2204
	return 0;
2205

2206
free_regulators:
2207 2208
	while (i != 0)
		regulator_put(opp_table->regulators[--i]);
2209 2210 2211

	kfree(opp_table->regulators);
	opp_table->regulators = NULL;
2212
	opp_table->regulator_count = -1;
2213

2214
	return ret;
2215 2216 2217
}

/**
2218 2219
 * _opp_put_regulators() - Releases resources blocked for regulator
 * @opp_table: OPP table returned from _opp_set_regulators().
2220
 */
2221
static void _opp_put_regulators(struct opp_table *opp_table)
2222
{
2223 2224
	int i;

2225
	if (!opp_table->regulators)
2226
		return;
2227

2228
	if (opp_table->enabled) {
2229 2230 2231 2232
		for (i = opp_table->regulator_count - 1; i >= 0; i--)
			regulator_disable(opp_table->regulators[i]);
	}

2233
	for (i = opp_table->regulator_count - 1; i >= 0; i--)
2234 2235 2236 2237
		regulator_put(opp_table->regulators[i]);

	kfree(opp_table->regulators);
	opp_table->regulators = NULL;
2238
	opp_table->regulator_count = -1;
2239 2240
}

2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251
static void _put_clks(struct opp_table *opp_table, int count)
{
	int i;

	for (i = count - 1; i >= 0; i--)
		clk_put(opp_table->clks[i]);

	kfree(opp_table->clks);
	opp_table->clks = NULL;
}

2252
/**
2253 2254 2255 2256 2257 2258 2259 2260 2261
 * _opp_set_clknames() - Set clk names for the device
 * @dev: Device for which clk names is being set.
 * @names: Clk names.
 *
 * In order to support OPP switching, OPP layer needs to get pointers to the
 * clocks for the device. Simple cases work fine without using this routine
 * (i.e. by passing connection-id as NULL), but for a device with multiple
 * clocks available, the OPP core needs to know the exact names of the clks to
 * use.
2262 2263 2264
 *
 * This must be called before any OPPs are initialized for the device.
 */
2265
static int _opp_set_clknames(struct opp_table *opp_table, struct device *dev,
2266 2267
			     const char * const names[],
			     config_clks_t config_clks)
2268
{
2269
	const char * const *temp = names;
2270 2271
	int count = 0, ret, i;
	struct clk *clk;
2272

2273 2274 2275
	/* Count number of clks */
	while (*temp++)
		count++;
2276

2277 2278 2279 2280 2281 2282 2283
	/*
	 * This is a special case where we have a single clock, whose connection
	 * id name is NULL, i.e. first two entries are NULL in the array.
	 */
	if (!count && !names[1])
		count = 1;

2284
	/* Fail early for invalid configurations */
2285
	if (!count || (!config_clks && count > 1))
2286
		return -EINVAL;
2287

2288
	/* Another CPU that shares the OPP table has set the clkname ? */
2289
	if (opp_table->clks)
2290
		return 0;
2291

2292 2293 2294 2295
	opp_table->clks = kmalloc_array(count, sizeof(*opp_table->clks),
					GFP_KERNEL);
	if (!opp_table->clks)
		return -ENOMEM;
2296

2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307
	/* Find clks for the device */
	for (i = 0; i < count; i++) {
		clk = clk_get(dev, names[i]);
		if (IS_ERR(clk)) {
			ret = dev_err_probe(dev, PTR_ERR(clk),
					    "%s: Couldn't find clock with name: %s\n",
					    __func__, names[i]);
			goto free_clks;
		}

		opp_table->clks[i] = clk;
2308 2309
	}

2310
	opp_table->clk_count = count;
2311
	opp_table->config_clks = config_clks;
2312 2313 2314

	/* Set generic single clk set here */
	if (count == 1) {
2315 2316
		if (!opp_table->config_clks)
			opp_table->config_clks = _opp_config_clk_single;
2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331

		/*
		 * We could have just dropped the "clk" field and used "clks"
		 * everywhere. Instead we kept the "clk" field around for
		 * following reasons:
		 *
		 * - avoiding clks[0] everywhere else.
		 * - not running single clk helpers for multiple clk usecase by
		 *   mistake.
		 *
		 * Since this is single-clk case, just update the clk pointer
		 * too.
		 */
		opp_table->clk = opp_table->clks[0];
	}
2332

2333
	return 0;
2334 2335 2336 2337

free_clks:
	_put_clks(opp_table, i);
	return ret;
2338 2339 2340
}

/**
2341 2342
 * _opp_put_clknames() - Releases resources blocked for clks.
 * @opp_table: OPP table returned from _opp_set_clknames().
2343
 */
2344
static void _opp_put_clknames(struct opp_table *opp_table)
2345
{
2346 2347 2348 2349 2350 2351 2352
	if (!opp_table->clks)
		return;

	opp_table->config_clks = NULL;
	opp_table->clk = ERR_PTR(-ENODEV);

	_put_clks(opp_table, opp_table->clk_count);
2353 2354
}

2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386
/**
 * _opp_set_config_regulators_helper() - Register custom set regulator helper.
 * @dev: Device for which the helper is getting registered.
 * @config_regulators: Custom set regulator helper.
 *
 * This is useful to support platforms with multiple regulators per device.
 *
 * This must be called before any OPPs are initialized for the device.
 */
static int _opp_set_config_regulators_helper(struct opp_table *opp_table,
		struct device *dev, config_regulators_t config_regulators)
{
	/* Another CPU that shares the OPP table has set the helper ? */
	if (!opp_table->config_regulators)
		opp_table->config_regulators = config_regulators;

	return 0;
}

/**
 * _opp_put_config_regulators_helper() - Releases resources blocked for
 *					 config_regulators helper.
 * @opp_table: OPP table returned from _opp_set_config_regulators_helper().
 *
 * Release resources blocked for platform specific config_regulators helper.
 */
static void _opp_put_config_regulators_helper(struct opp_table *opp_table)
{
	if (opp_table->config_regulators)
		opp_table->config_regulators = NULL;
}

2387
static void _detach_genpd(struct opp_table *opp_table)
2388 2389 2390
{
	int index;

2391 2392 2393
	if (!opp_table->genpd_virt_devs)
		return;

2394 2395 2396 2397 2398 2399 2400
	for (index = 0; index < opp_table->required_opp_count; index++) {
		if (!opp_table->genpd_virt_devs[index])
			continue;

		dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false);
		opp_table->genpd_virt_devs[index] = NULL;
	}
2401 2402 2403

	kfree(opp_table->genpd_virt_devs);
	opp_table->genpd_virt_devs = NULL;
2404 2405
}

2406
/**
2407
 * _opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
2408 2409
 * @dev: Consumer device for which the genpd is getting attached.
 * @names: Null terminated array of pointers containing names of genpd to attach.
2410
 * @virt_devs: Pointer to return the array of virtual devices.
2411 2412 2413 2414 2415
 *
 * Multiple generic power domains for a device are supported with the help of
 * virtual genpd devices, which are created for each consumer device - genpd
 * pair. These are the device structures which are attached to the power domain
 * and are required by the OPP core to set the performance state of the genpd.
2416 2417
 * The same API also works for the case where single genpd is available and so
 * we don't need to support that separately.
2418 2419
 *
 * This helper will normally be called by the consumer driver of the device
2420
 * "dev", as only that has details of the genpd names.
2421
 *
2422 2423
 * This helper needs to be called once with a list of all genpd to attach.
 * Otherwise the original device structure will be used instead by the OPP core.
2424 2425 2426
 *
 * The order of entries in the names array must match the order in which
 * "required-opps" are added in DT.
2427
 */
2428 2429
static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
			const char * const *names, struct device ***virt_devs)
2430
{
2431
	struct device *virt_dev;
2432
	int index = 0, ret = -EINVAL;
2433
	const char * const *name = names;
2434

2435
	if (opp_table->genpd_virt_devs)
2436
		return 0;
2437

2438 2439 2440 2441 2442
	/*
	 * If the genpd's OPP table isn't already initialized, parsing of the
	 * required-opps fail for dev. We should retry this after genpd's OPP
	 * table is added.
	 */
2443 2444
	if (!opp_table->required_opp_count)
		return -EPROBE_DEFER;
2445

2446 2447
	mutex_lock(&opp_table->genpd_virt_dev_lock);

2448 2449 2450 2451 2452
	opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count,
					     sizeof(*opp_table->genpd_virt_devs),
					     GFP_KERNEL);
	if (!opp_table->genpd_virt_devs)
		goto unlock;
2453

2454 2455 2456 2457 2458 2459
	while (*name) {
		if (index >= opp_table->required_opp_count) {
			dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n",
				*name, opp_table->required_opp_count, index);
			goto err;
		}
2460

2461
		virt_dev = dev_pm_domain_attach_by_name(dev, *name);
2462
		if (IS_ERR_OR_NULL(virt_dev)) {
2463
			ret = virt_dev ? PTR_ERR(virt_dev) : -ENODEV;
2464 2465 2466 2467 2468
			dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
			goto err;
		}

		opp_table->genpd_virt_devs[index] = virt_dev;
2469
		index++;
2470
		name++;
2471 2472
	}

2473 2474
	if (virt_devs)
		*virt_devs = opp_table->genpd_virt_devs;
2475 2476
	mutex_unlock(&opp_table->genpd_virt_dev_lock);

2477
	return 0;
2478 2479

err:
2480
	_detach_genpd(opp_table);
2481
unlock:
2482
	mutex_unlock(&opp_table->genpd_virt_dev_lock);
2483
	return ret;
2484

2485 2486 2487
}

/**
2488 2489
 * _opp_detach_genpd() - Detach genpd(s) from the device.
 * @opp_table: OPP table returned by _opp_attach_genpd().
2490
 *
2491 2492
 * This detaches the genpd(s), resets the virtual device pointers, and puts the
 * OPP table.
2493
 */
2494
static void _opp_detach_genpd(struct opp_table *opp_table)
2495 2496 2497 2498 2499 2500
{
	/*
	 * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
	 * used in parallel.
	 */
	mutex_lock(&opp_table->genpd_virt_dev_lock);
2501
	_detach_genpd(opp_table);
2502 2503
	mutex_unlock(&opp_table->genpd_virt_dev_lock);
}
2504

2505 2506 2507
static void _opp_clear_config(struct opp_config_data *data)
{
	if (data->flags & OPP_CONFIG_GENPD)
2508
		_opp_detach_genpd(data->opp_table);
2509
	if (data->flags & OPP_CONFIG_REGULATOR)
2510
		_opp_put_regulators(data->opp_table);
2511
	if (data->flags & OPP_CONFIG_SUPPORTED_HW)
2512
		_opp_put_supported_hw(data->opp_table);
2513
	if (data->flags & OPP_CONFIG_REGULATOR_HELPER)
2514
		_opp_put_config_regulators_helper(data->opp_table);
2515
	if (data->flags & OPP_CONFIG_PROP_NAME)
2516
		_opp_put_prop_name(data->opp_table);
2517
	if (data->flags & OPP_CONFIG_CLK)
2518
		_opp_put_clknames(data->opp_table);
2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542

	dev_pm_opp_put_opp_table(data->opp_table);
	kfree(data);
}

/**
 * dev_pm_opp_set_config() - Set OPP configuration for the device.
 * @dev: Device for which configuration is being set.
 * @config: OPP configuration.
 *
 * This allows all device OPP configurations to be performed at once.
 *
 * This must be called before any OPPs are initialized for the device. This may
 * be called multiple times for the same OPP table, for example once for each
 * CPU that share the same table. This must be balanced by the same number of
 * calls to dev_pm_opp_clear_config() in order to free the OPP table properly.
 *
 * This returns a token to the caller, which must be passed to
 * dev_pm_opp_clear_config() to free the resources later. The value of the
 * returned token will be >= 1 for success and negative for errors. The minimum
 * value of 1 is chosen here to make it easy for callers to manage the resource.
 */
int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
2543
	struct opp_table *opp_table;
2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568
	struct opp_config_data *data;
	unsigned int id;
	int ret;

	data = kmalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	opp_table = _add_opp_table(dev, false);
	if (IS_ERR(opp_table)) {
		kfree(data);
		return PTR_ERR(opp_table);
	}

	data->opp_table = opp_table;
	data->flags = 0;

	/* This should be called before OPPs are initialized */
	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
		ret = -EBUSY;
		goto err;
	}

	/* Configure clocks */
	if (config->clk_names) {
2569 2570
		ret = _opp_set_clknames(opp_table, dev, config->clk_names,
					config->config_clks);
2571
		if (ret)
2572 2573 2574
			goto err;

		data->flags |= OPP_CONFIG_CLK;
2575 2576 2577 2578
	} else if (config->config_clks) {
		/* Don't allow config callback without clocks */
		ret = -EINVAL;
		goto err;
2579 2580 2581 2582
	}

	/* Configure property names */
	if (config->prop_name) {
2583 2584
		ret = _opp_set_prop_name(opp_table, config->prop_name);
		if (ret)
2585 2586 2587 2588 2589
			goto err;

		data->flags |= OPP_CONFIG_PROP_NAME;
	}

2590 2591 2592 2593 2594 2595 2596 2597 2598 2599
	/* Configure config_regulators helper */
	if (config->config_regulators) {
		ret = _opp_set_config_regulators_helper(opp_table, dev,
						config->config_regulators);
		if (ret)
			goto err;

		data->flags |= OPP_CONFIG_REGULATOR_HELPER;
	}

2600 2601
	/* Configure supported hardware */
	if (config->supported_hw) {
2602 2603 2604
		ret = _opp_set_supported_hw(opp_table, config->supported_hw,
					    config->supported_hw_count);
		if (ret)
2605 2606 2607 2608 2609 2610 2611
			goto err;

		data->flags |= OPP_CONFIG_SUPPORTED_HW;
	}

	/* Configure supplies */
	if (config->regulator_names) {
2612 2613 2614
		ret = _opp_set_regulators(opp_table, dev,
					  config->regulator_names);
		if (ret)
2615 2616 2617 2618 2619 2620 2621
			goto err;

		data->flags |= OPP_CONFIG_REGULATOR;
	}

	/* Attach genpds */
	if (config->genpd_names) {
2622 2623 2624
		ret = _opp_attach_genpd(opp_table, dev, config->genpd_names,
					config->virt_devs);
		if (ret)
2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701
			goto err;

		data->flags |= OPP_CONFIG_GENPD;
	}

	ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX),
		       GFP_KERNEL);
	if (ret)
		goto err;

	return id;

err:
	_opp_clear_config(data);
	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_config);

/**
 * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration.
 * @opp_table: OPP table returned from dev_pm_opp_set_config().
 *
 * This allows all device OPP configurations to be cleared at once. This must be
 * called once for each call made to dev_pm_opp_set_config(), in order to free
 * the OPPs properly.
 *
 * Currently the first call itself ends up freeing all the OPP configurations,
 * while the later ones only drop the OPP table reference. This works well for
 * now as we would never want to use an half initialized OPP table and want to
 * remove the configurations together.
 */
void dev_pm_opp_clear_config(int token)
{
	struct opp_config_data *data;

	/*
	 * This lets the callers call this unconditionally and keep their code
	 * simple.
	 */
	if (unlikely(token <= 0))
		return;

	data = xa_erase(&opp_configs, token);
	if (WARN_ON(!data))
		return;

	_opp_clear_config(data);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_clear_config);

static void devm_pm_opp_config_release(void *token)
{
	dev_pm_opp_clear_config((unsigned long)token);
}

/**
 * devm_pm_opp_set_config() - Set OPP configuration for the device.
 * @dev: Device for which configuration is being set.
 * @config: OPP configuration.
 *
 * This allows all device OPP configurations to be performed at once.
 * This is a resource-managed variant of dev_pm_opp_set_config().
 *
 * Return: 0 on success and errorno otherwise.
 */
int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
	int token = dev_pm_opp_set_config(dev, config);

	if (token < 0)
		return token;

	return devm_add_action_or_reset(dev, devm_pm_opp_config_release,
					(void *) ((unsigned long) token));
}
EXPORT_SYMBOL_GPL(devm_pm_opp_set_config);

2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756
/**
 * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP.
 * @src_table: OPP table which has @dst_table as one of its required OPP table.
 * @dst_table: Required OPP table of the @src_table.
 * @src_opp: OPP from the @src_table.
 *
 * This function returns the OPP (present in @dst_table) pointed out by the
 * "required-opps" property of the @src_opp (present in @src_table).
 *
 * The callers are required to call dev_pm_opp_put() for the returned OPP after
 * use.
 *
 * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise.
 */
struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table,
						 struct opp_table *dst_table,
						 struct dev_pm_opp *src_opp)
{
	struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV);
	int i;

	if (!src_table || !dst_table || !src_opp ||
	    !src_table->required_opp_tables)
		return ERR_PTR(-EINVAL);

	/* required-opps not fully initialized yet */
	if (lazy_linking_pending(src_table))
		return ERR_PTR(-EBUSY);

	for (i = 0; i < src_table->required_opp_count; i++) {
		if (src_table->required_opp_tables[i] == dst_table) {
			mutex_lock(&src_table->lock);

			list_for_each_entry(opp, &src_table->opp_list, node) {
				if (opp == src_opp) {
					dest_opp = opp->required_opps[i];
					dev_pm_opp_get(dest_opp);
					break;
				}
			}

			mutex_unlock(&src_table->lock);
			break;
		}
	}

	if (IS_ERR(dest_opp)) {
		pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__,
		       src_table, dst_table);
	}

	return dest_opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp);

2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784
/**
 * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
 * @src_table: OPP table which has dst_table as one of its required OPP table.
 * @dst_table: Required OPP table of the src_table.
 * @pstate: Current performance state of the src_table.
 *
 * This Returns pstate of the OPP (present in @dst_table) pointed out by the
 * "required-opps" property of the OPP (present in @src_table) which has
 * performance state set to @pstate.
 *
 * Return: Zero or positive performance state on success, otherwise negative
 * value on errors.
 */
int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
				       struct opp_table *dst_table,
				       unsigned int pstate)
{
	struct dev_pm_opp *opp;
	int dest_pstate = -EINVAL;
	int i;

	/*
	 * Normally the src_table will have the "required_opps" property set to
	 * point to one of the OPPs in the dst_table, but in some cases the
	 * genpd and its master have one to one mapping of performance states
	 * and so none of them have the "required-opps" property set. Return the
	 * pstate of the src_table as it is in such cases.
	 */
2785
	if (!src_table || !src_table->required_opp_count)
2786 2787
		return pstate;

2788 2789 2790 2791 2792 2793
	/* Both OPP tables must belong to genpds */
	if (unlikely(!src_table->is_genpd || !dst_table->is_genpd)) {
		pr_err("%s: Performance state is only valid for genpds.\n", __func__);
		return -EINVAL;
	}

2794 2795 2796 2797
	/* required-opps not fully initialized yet */
	if (lazy_linking_pending(src_table))
		return -EBUSY;

2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811
	for (i = 0; i < src_table->required_opp_count; i++) {
		if (src_table->required_opp_tables[i]->np == dst_table->np)
			break;
	}

	if (unlikely(i == src_table->required_opp_count)) {
		pr_err("%s: Couldn't find matching OPP table (%p: %p)\n",
		       __func__, src_table, dst_table);
		return -EINVAL;
	}

	mutex_lock(&src_table->lock);

	list_for_each_entry(opp, &src_table->opp_list, node) {
2812 2813
		if (opp->level == pstate) {
			dest_pstate = opp->required_opps[i]->level;
2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826
			goto unlock;
		}
	}

	pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table,
	       dst_table);

unlock:
	mutex_unlock(&src_table->lock);

	return dest_pstate;
}

2827
/**
2828 2829 2830
 * dev_pm_opp_add_dynamic()  - Add an OPP table from a table definitions
 * @dev:	The device for which we do this operation
 * @data:	The OPP data for the OPP to add
2831
 *
2832
 * This function adds an opp definition to the opp table and returns status.
2833 2834 2835 2836
 * The opp is made available by default and it can be controlled using
 * dev_pm_opp_enable/disable functions.
 *
 * Return:
2837
 * 0		On success OR
2838
 *		Duplicate OPPs (both freq and volt are same) and opp->available
2839
 * -EEXIST	Freq are same and volt are different OR
2840
 *		Duplicate OPPs (both freq and volt are same) and !opp->available
2841
 * -ENOMEM	Memory allocation failure
2842
 */
2843
int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *data)
2844
{
2845 2846 2847
	struct opp_table *opp_table;
	int ret;

2848
	opp_table = _add_opp_table(dev, true);
2849 2850
	if (IS_ERR(opp_table))
		return PTR_ERR(opp_table);
2851

2852 2853 2854
	/* Fix regulator count for dynamic OPPs */
	opp_table->regulator_count = 1;

2855
	ret = _opp_add_v1(opp_table, dev, data, true);
2856 2857
	if (ret)
		dev_pm_opp_put_opp_table(opp_table);
2858 2859

	return ret;
2860
}
2861
EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic);
2862 2863

/**
2864
 * _opp_set_availability() - helper to set the availability of an opp
2865 2866 2867 2868
 * @dev:		device for which we do this operation
 * @freq:		OPP frequency to modify availability
 * @availability_req:	availability status requested for this opp
 *
2869 2870
 * Set the availability of an OPP, opp_{enable,disable} share a common logic
 * which is isolated here.
2871
 *
2872
 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
2873
 * copy operation, returns 0 if no modification was done OR modification was
2874 2875
 * successful.
 */
2876 2877
static int _opp_set_availability(struct device *dev, unsigned long freq,
				 bool availability_req)
2878
{
2879
	struct opp_table *opp_table;
2880
	struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
2881 2882
	int r = 0;

2883 2884 2885 2886
	/* Find the opp_table */
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		r = PTR_ERR(opp_table);
2887
		dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
2888
		return r;
2889 2890
	}

2891 2892 2893 2894 2895
	if (!assert_single_clk(opp_table)) {
		r = -EINVAL;
		goto put_table;
	}

2896 2897
	mutex_lock(&opp_table->lock);

2898
	/* Do we have the frequency? */
2899
	list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
2900
		if (tmp_opp->rates[0] == freq) {
2901 2902 2903 2904
			opp = tmp_opp;
			break;
		}
	}
2905

2906 2907 2908 2909 2910 2911 2912 2913 2914
	if (IS_ERR(opp)) {
		r = PTR_ERR(opp);
		goto unlock;
	}

	/* Is update really needed? */
	if (opp->available == availability_req)
		goto unlock;

2915
	opp->available = availability_req;
2916

2917 2918 2919
	dev_pm_opp_get(opp);
	mutex_unlock(&opp_table->lock);

2920 2921
	/* Notify the change of the OPP availability */
	if (availability_req)
2922
		blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
2923
					     opp);
2924
	else
2925
		blocking_notifier_call_chain(&opp_table->head,
2926
					     OPP_EVENT_DISABLE, opp);
2927

2928 2929 2930
	dev_pm_opp_put(opp);
	goto put_table;

2931
unlock:
2932
	mutex_unlock(&opp_table->lock);
2933
put_table:
2934
	dev_pm_opp_put_opp_table(opp_table);
2935 2936 2937
	return r;
}

2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966
/**
 * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP
 * @dev:		device for which we do this operation
 * @freq:		OPP frequency to adjust voltage of
 * @u_volt:		new OPP target voltage
 * @u_volt_min:		new OPP min voltage
 * @u_volt_max:		new OPP max voltage
 *
 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
 * copy operation, returns 0 if no modifcation was done OR modification was
 * successful.
 */
int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
			      unsigned long u_volt, unsigned long u_volt_min,
			      unsigned long u_volt_max)

{
	struct opp_table *opp_table;
	struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
	int r = 0;

	/* Find the opp_table */
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		r = PTR_ERR(opp_table);
		dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
		return r;
	}

2967 2968 2969 2970 2971
	if (!assert_single_clk(opp_table)) {
		r = -EINVAL;
		goto put_table;
	}

2972 2973 2974 2975
	mutex_lock(&opp_table->lock);

	/* Do we have the frequency? */
	list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
2976
		if (tmp_opp->rates[0] == freq) {
2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002
			opp = tmp_opp;
			break;
		}
	}

	if (IS_ERR(opp)) {
		r = PTR_ERR(opp);
		goto adjust_unlock;
	}

	/* Is update really needed? */
	if (opp->supplies->u_volt == u_volt)
		goto adjust_unlock;

	opp->supplies->u_volt = u_volt;
	opp->supplies->u_volt_min = u_volt_min;
	opp->supplies->u_volt_max = u_volt_max;

	dev_pm_opp_get(opp);
	mutex_unlock(&opp_table->lock);

	/* Notify the voltage change of the OPP */
	blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE,
				     opp);

	dev_pm_opp_put(opp);
3003
	goto put_table;
3004 3005 3006

adjust_unlock:
	mutex_unlock(&opp_table->lock);
3007
put_table:
3008 3009 3010
	dev_pm_opp_put_opp_table(opp_table);
	return r;
}
3011
EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
3012

3013
/**
3014
 * dev_pm_opp_enable() - Enable a specific OPP
3015 3016 3017 3018 3019
 * @dev:	device for which we do this operation
 * @freq:	OPP frequency to enable
 *
 * Enables a provided opp. If the operation is valid, this returns 0, else the
 * corresponding error value. It is meant to be used for users an OPP available
3020
 * after being temporarily made unavailable with dev_pm_opp_disable.
3021
 *
3022
 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
3023
 * copy operation, returns 0 if no modification was done OR modification was
3024
 * successful.
3025
 */
3026
int dev_pm_opp_enable(struct device *dev, unsigned long freq)
3027
{
3028
	return _opp_set_availability(dev, freq, true);
3029
}
3030
EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
3031 3032

/**
3033
 * dev_pm_opp_disable() - Disable a specific OPP
3034 3035 3036 3037 3038 3039
 * @dev:	device for which we do this operation
 * @freq:	OPP frequency to disable
 *
 * Disables a provided opp. If the operation is valid, this returns
 * 0, else the corresponding error value. It is meant to be a temporary
 * control by users to make this OPP not available until the circumstances are
3040
 * right to make it available again (with a call to dev_pm_opp_enable).
3041
 *
3042
 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
3043
 * copy operation, returns 0 if no modification was done OR modification was
3044
 * successful.
3045
 */
3046
int dev_pm_opp_disable(struct device *dev, unsigned long freq)
3047
{
3048
	return _opp_set_availability(dev, freq, false);
3049
}
3050
EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
3051

3052
/**
3053 3054 3055
 * dev_pm_opp_register_notifier() - Register OPP notifier for the device
 * @dev:	Device for which notifier needs to be registered
 * @nb:		Notifier block to be registered
3056
 *
3057 3058 3059 3060 3061 3062 3063 3064
 * Return: 0 on success or a negative error value.
 */
int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
{
	struct opp_table *opp_table;
	int ret;

	opp_table = _find_opp_table(dev);
3065 3066 3067
	if (IS_ERR(opp_table))
		return PTR_ERR(opp_table);

3068
	ret = blocking_notifier_chain_register(&opp_table->head, nb);
3069

3070
	dev_pm_opp_put_opp_table(opp_table);
3071 3072 3073 3074 3075 3076 3077 3078 3079

	return ret;
}
EXPORT_SYMBOL(dev_pm_opp_register_notifier);

/**
 * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
 * @dev:	Device for which notifier needs to be unregistered
 * @nb:		Notifier block to be unregistered
3080
 *
3081
 * Return: 0 on success or a negative error value.
3082
 */
3083 3084
int dev_pm_opp_unregister_notifier(struct device *dev,
				   struct notifier_block *nb)
3085
{
3086 3087
	struct opp_table *opp_table;
	int ret;
3088

3089
	opp_table = _find_opp_table(dev);
3090 3091
	if (IS_ERR(opp_table))
		return PTR_ERR(opp_table);
3092

3093
	ret = blocking_notifier_chain_unregister(&opp_table->head, nb);
3094

3095
	dev_pm_opp_put_opp_table(opp_table);
3096 3097

	return ret;
3098
}
3099
EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
3100

3101 3102 3103 3104 3105 3106 3107 3108
/**
 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
 * @dev:	device pointer used to lookup OPP table.
 *
 * Free both OPPs created using static entries present in DT and the
 * dynamically added entries.
 */
void dev_pm_opp_remove_table(struct device *dev)
3109 3110 3111
{
	struct opp_table *opp_table;

3112 3113 3114 3115
	/* Check for existing table for 'dev' */
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table)) {
		int error = PTR_ERR(opp_table);
3116 3117

		if (error != -ENODEV)
3118
			WARN(1, "%s: opp_table: %d\n",
3119 3120 3121
			     IS_ERR_OR_NULL(dev) ?
					"Invalid device" : dev_name(dev),
			     error);
3122
		return;
3123 3124
	}

3125 3126 3127 3128 3129 3130
	/*
	 * Drop the extra reference only if the OPP table was successfully added
	 * with dev_pm_opp_of_add_table() earlier.
	 **/
	if (_opp_remove_all_static(opp_table))
		dev_pm_opp_put_opp_table(opp_table);
3131 3132 3133

	/* Drop reference taken by _find_opp_table() */
	dev_pm_opp_put_opp_table(opp_table);
3134
}
3135
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176

/**
 * dev_pm_opp_sync_regulators() - Sync state of voltage regulators
 * @dev:	device for which we do this operation
 *
 * Sync voltage state of the OPP table regulators.
 *
 * Return: 0 on success or a negative error value.
 */
int dev_pm_opp_sync_regulators(struct device *dev)
{
	struct opp_table *opp_table;
	struct regulator *reg;
	int i, ret = 0;

	/* Device may not have OPP table */
	opp_table = _find_opp_table(dev);
	if (IS_ERR(opp_table))
		return 0;

	/* Regulator may not be required for the device */
	if (unlikely(!opp_table->regulators))
		goto put_table;

	/* Nothing to sync if voltage wasn't changed */
	if (!opp_table->enabled)
		goto put_table;

	for (i = 0; i < opp_table->regulator_count; i++) {
		reg = opp_table->regulators[i];
		ret = regulator_sync_voltage(reg);
		if (ret)
			break;
	}
put_table:
	/* Drop reference taken by _find_opp_table() */
	dev_pm_opp_put_opp_table(opp_table);

	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators);