dca-core.c 11 KB
Newer Older
1
/*
2
 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59
 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 *
 * The full GNU General Public License is included in this distribution in the
 * file called COPYING.
 */

/*
 * This driver supports an interface for DCA clients and providers to meet.
 */

#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/device.h>
#include <linux/dca.h>
30
#include <linux/slab.h>
31
#include <linux/module.h>
32

33
#define DCA_VERSION "1.12.1"
34

35 36 37
MODULE_VERSION(DCA_VERSION);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
38

39
static DEFINE_RAW_SPINLOCK(dca_lock);
40

41
static LIST_HEAD(dca_domains);
42

43 44 45 46
static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);

static int dca_providers_blocked;

47
static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
48
{
49 50
	struct pci_dev *pdev = to_pci_dev(dev);
	struct pci_bus *bus = pdev->bus;
51

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
	while (bus->parent)
		bus = bus->parent;

	return bus;
}

static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
{
	struct dca_domain *domain;

	domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
	if (!domain)
		return NULL;

	INIT_LIST_HEAD(&domain->dca_providers);
	domain->pci_rc = rc;

	return domain;
}

static void dca_free_domain(struct dca_domain *domain)
{
	list_del(&domain->node);
	kfree(domain);
}

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
static int dca_provider_ioat_ver_3_0(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);

	return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
		((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
}

static void unregister_dca_providers(void)
{
	struct dca_provider *dca, *_dca;
	struct list_head unregistered_providers;
	struct dca_domain *domain;
	unsigned long flags;

	blocking_notifier_call_chain(&dca_provider_chain,
				     DCA_PROVIDER_REMOVE, NULL);

	INIT_LIST_HEAD(&unregistered_providers);

105
	raw_spin_lock_irqsave(&dca_lock, flags);
106 107

	if (list_empty(&dca_domains)) {
108
		raw_spin_unlock_irqrestore(&dca_lock, flags);
109 110 111 112 113 114
		return;
	}

	/* at this point only one domain in the list is expected */
	domain = list_first_entry(&dca_domains, struct dca_domain, node);

115 116
	list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
		list_move(&dca->node, &unregistered_providers);
117 118 119

	dca_free_domain(domain);

120
	raw_spin_unlock_irqrestore(&dca_lock, flags);
121 122 123 124 125 126 127

	list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
		dca_sysfs_remove_provider(dca);
		list_del(&dca->node);
	}
}

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
static struct dca_domain *dca_find_domain(struct pci_bus *rc)
{
	struct dca_domain *domain;

	list_for_each_entry(domain, &dca_domains, node)
		if (domain->pci_rc == rc)
			return domain;

	return NULL;
}

static struct dca_domain *dca_get_domain(struct device *dev)
{
	struct pci_bus *rc;
	struct dca_domain *domain;

	rc = dca_pci_rc_from_dev(dev);
	domain = dca_find_domain(rc);

	if (!domain) {
148
		if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
149
			dca_providers_blocked = 1;
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
	}

	return domain;
}

static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
{
	struct dca_provider *dca;
	struct pci_bus *rc;
	struct dca_domain *domain;

	if (dev) {
		rc = dca_pci_rc_from_dev(dev);
		domain = dca_find_domain(rc);
		if (!domain)
			return NULL;
	} else {
		if (!list_empty(&dca_domains))
			domain = list_first_entry(&dca_domains,
						  struct dca_domain,
						  node);
		else
			return NULL;
173 174
	}

175 176 177 178 179
	list_for_each_entry(dca, &domain->dca_providers, node)
		if ((!dev) || (dca->ops->dev_managed(dca, dev)))
			return dca;

	return NULL;
180
}
181 182 183 184 185 186 187

/**
 * dca_add_requester - add a dca client to the list
 * @dev - the device that wants dca service
 */
int dca_add_requester(struct device *dev)
{
188 189
	struct dca_provider *dca;
	int err, slot = -ENODEV;
190
	unsigned long flags;
191 192
	struct pci_bus *pci_rc;
	struct dca_domain *domain;
193

194 195
	if (!dev)
		return -EFAULT;
196

197
	raw_spin_lock_irqsave(&dca_lock, flags);
198 199 200 201

	/* check if the requester has not been added already */
	dca = dca_find_provider_by_dev(dev);
	if (dca) {
202
		raw_spin_unlock_irqrestore(&dca_lock, flags);
203 204 205
		return -EEXIST;
	}

206 207 208
	pci_rc = dca_pci_rc_from_dev(dev);
	domain = dca_find_domain(pci_rc);
	if (!domain) {
209
		raw_spin_unlock_irqrestore(&dca_lock, flags);
210 211 212 213
		return -ENODEV;
	}

	list_for_each_entry(dca, &domain->dca_providers, node) {
214 215 216 217
		slot = dca->ops->add_requester(dca, dev);
		if (slot >= 0)
			break;
	}
218

219
	raw_spin_unlock_irqrestore(&dca_lock, flags);
220 221

	if (slot < 0)
222 223
		return slot;

224
	err = dca_sysfs_add_req(dca, dev, slot);
225
	if (err) {
226
		raw_spin_lock_irqsave(&dca_lock, flags);
227 228
		if (dca == dca_find_provider_by_dev(dev))
			dca->ops->remove_requester(dca, dev);
229
		raw_spin_unlock_irqrestore(&dca_lock, flags);
230 231 232 233 234 235 236 237 238 239 240 241 242
		return err;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(dca_add_requester);

/**
 * dca_remove_requester - remove a dca client from the list
 * @dev - the device that wants dca service
 */
int dca_remove_requester(struct device *dev)
{
243
	struct dca_provider *dca;
244
	int slot;
245
	unsigned long flags;
246 247 248

	if (!dev)
		return -EFAULT;
249

250
	raw_spin_lock_irqsave(&dca_lock, flags);
251 252
	dca = dca_find_provider_by_dev(dev);
	if (!dca) {
253
		raw_spin_unlock_irqrestore(&dca_lock, flags);
254 255 256
		return -ENODEV;
	}
	slot = dca->ops->remove_requester(dca, dev);
257
	raw_spin_unlock_irqrestore(&dca_lock, flags);
258 259

	if (slot < 0)
260 261
		return slot;

262 263
	dca_sysfs_remove_req(dca, slot);

264 265 266 267 268
	return 0;
}
EXPORT_SYMBOL_GPL(dca_remove_requester);

/**
269 270
 * dca_common_get_tag - return the dca tag (serves both new and old api)
 * @dev - the device that wants dca service
271 272
 * @cpu - the cpuid as returned by get_cpu()
 */
273
u8 dca_common_get_tag(struct device *dev, int cpu)
274
{
275 276
	struct dca_provider *dca;
	u8 tag;
277
	unsigned long flags;
278

279
	raw_spin_lock_irqsave(&dca_lock, flags);
280 281 282

	dca = dca_find_provider_by_dev(dev);
	if (!dca) {
283
		raw_spin_unlock_irqrestore(&dca_lock, flags);
284
		return -ENODEV;
285 286 287
	}
	tag = dca->ops->get_tag(dca, dev, cpu);

288
	raw_spin_unlock_irqrestore(&dca_lock, flags);
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
	return tag;
}

/**
 * dca3_get_tag - return the dca tag to the requester device
 *                for the given cpu (new api)
 * @dev - the device that wants dca service
 * @cpu - the cpuid as returned by get_cpu()
 */
u8 dca3_get_tag(struct device *dev, int cpu)
{
	if (!dev)
		return -EFAULT;

	return dca_common_get_tag(dev, cpu);
}
EXPORT_SYMBOL_GPL(dca3_get_tag);

/**
 * dca_get_tag - return the dca tag for the given cpu (old api)
 * @cpu - the cpuid as returned by get_cpu()
 */
u8 dca_get_tag(int cpu)
{
	struct device *dev = NULL;

	return dca_common_get_tag(dev, cpu);
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
}
EXPORT_SYMBOL_GPL(dca_get_tag);

/**
 * alloc_dca_provider - get data struct for describing a dca provider
 * @ops - pointer to struct of dca operation function pointers
 * @priv_size - size of extra mem to be added for provider's needs
 */
struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size)
{
	struct dca_provider *dca;
	int alloc_size;

	alloc_size = (sizeof(*dca) + priv_size);
	dca = kzalloc(alloc_size, GFP_KERNEL);
	if (!dca)
		return NULL;
	dca->ops = ops;

	return dca;
}
EXPORT_SYMBOL_GPL(alloc_dca_provider);

/**
 * free_dca_provider - release the dca provider data struct
 * @ops - pointer to struct of dca operation function pointers
 * @priv_size - size of extra mem to be added for provider's needs
 */
void free_dca_provider(struct dca_provider *dca)
{
	kfree(dca);
}
EXPORT_SYMBOL_GPL(free_dca_provider);

/**
 * register_dca_provider - register a dca provider
 * @dca - struct created by alloc_dca_provider()
 * @dev - device providing dca services
 */
int register_dca_provider(struct dca_provider *dca, struct device *dev)
{
	int err;
358
	unsigned long flags;
359
	struct dca_domain *domain, *newdomain = NULL;
360

361
	raw_spin_lock_irqsave(&dca_lock, flags);
362
	if (dca_providers_blocked) {
363
		raw_spin_unlock_irqrestore(&dca_lock, flags);
364 365
		return -ENODEV;
	}
366
	raw_spin_unlock_irqrestore(&dca_lock, flags);
367

368 369 370
	err = dca_sysfs_add_provider(dca, dev);
	if (err)
		return err;
371

372
	raw_spin_lock_irqsave(&dca_lock, flags);
373 374
	domain = dca_get_domain(dev);
	if (!domain) {
375 376
		struct pci_bus *rc;

377
		if (dca_providers_blocked) {
378
			raw_spin_unlock_irqrestore(&dca_lock, flags);
379 380
			dca_sysfs_remove_provider(dca);
			unregister_dca_providers();
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
			return -ENODEV;
		}

		raw_spin_unlock_irqrestore(&dca_lock, flags);
		rc = dca_pci_rc_from_dev(dev);
		newdomain = dca_allocate_domain(rc);
		if (!newdomain)
			return -ENODEV;
		raw_spin_lock_irqsave(&dca_lock, flags);
		/* Recheck, we might have raced after dropping the lock */
		domain = dca_get_domain(dev);
		if (!domain) {
			domain = newdomain;
			newdomain = NULL;
			list_add(&domain->node, &dca_domains);
396
		}
397 398
	}
	list_add(&dca->node, &domain->dca_providers);
399
	raw_spin_unlock_irqrestore(&dca_lock, flags);
400

401 402
	blocking_notifier_call_chain(&dca_provider_chain,
				     DCA_PROVIDER_ADD, NULL);
403
	kfree(newdomain);
404 405 406 407 408 409 410 411
	return 0;
}
EXPORT_SYMBOL_GPL(register_dca_provider);

/**
 * unregister_dca_provider - remove a dca provider
 * @dca - struct created by alloc_dca_provider()
 */
412
void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
413
{
414
	unsigned long flags;
415 416
	struct pci_bus *pci_rc;
	struct dca_domain *domain;
417

418 419
	blocking_notifier_call_chain(&dca_provider_chain,
				     DCA_PROVIDER_REMOVE, NULL);
420

421
	raw_spin_lock_irqsave(&dca_lock, flags);
422

423 424 425 426 427
	if (list_empty(&dca_domains)) {
		raw_spin_unlock_irqrestore(&dca_lock, flags);
		return;
	}

428
	list_del(&dca->node);
429 430 431 432 433 434

	pci_rc = dca_pci_rc_from_dev(dev);
	domain = dca_find_domain(pci_rc);
	if (list_empty(&domain->dca_providers))
		dca_free_domain(domain);

435
	raw_spin_unlock_irqrestore(&dca_lock, flags);
436

437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
	dca_sysfs_remove_provider(dca);
}
EXPORT_SYMBOL_GPL(unregister_dca_provider);

/**
 * dca_register_notify - register a client's notifier callback
 */
void dca_register_notify(struct notifier_block *nb)
{
	blocking_notifier_chain_register(&dca_provider_chain, nb);
}
EXPORT_SYMBOL_GPL(dca_register_notify);

/**
 * dca_unregister_notify - remove a client's notifier callback
 */
void dca_unregister_notify(struct notifier_block *nb)
{
	blocking_notifier_chain_unregister(&dca_provider_chain, nb);
}
EXPORT_SYMBOL_GPL(dca_unregister_notify);

static int __init dca_init(void)
{
461
	pr_info("dca service started, version %s\n", DCA_VERSION);
462 463 464 465 466 467 468 469
	return dca_sysfs_init();
}

static void __exit dca_exit(void)
{
	dca_sysfs_exit();
}

470
arch_initcall(dca_init);
471 472
module_exit(dca_exit);