edac_mc_sysfs.c 26.5 KB
Newer Older
1 2
/*
 * edac_mc kernel module
3 4
 * (C) 2005-2007 Linux Networx (http://lnxi.com)
 *
5 6 7
 * This file may be distributed under the terms of the
 * GNU General Public License.
 *
8
 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
9
 *
10
 * (c) 2012-2013 - Mauro Carvalho Chehab
11 12
 *	The entire API were re-written, and ported to use struct device
 *
13 14 15
 */

#include <linux/ctype.h>
16
#include <linux/slab.h>
17
#include <linux/edac.h>
18
#include <linux/bug.h>
19
#include <linux/pm_runtime.h>
20
#include <linux/uaccess.h>
21

22
#include "edac_core.h"
23 24 25
#include "edac_module.h"

/* MC EDAC Controls, setable by module parameter, and sysfs */
26 27
static int edac_mc_log_ue = 1;
static int edac_mc_log_ce = 1;
28
static int edac_mc_panic_on_ue;
29
static int edac_mc_poll_msec = 1000;
30 31

/* Getter functions for above */
32
int edac_mc_get_log_ue(void)
33
{
34
	return edac_mc_log_ue;
35 36
}

37
int edac_mc_get_log_ce(void)
38
{
39
	return edac_mc_log_ce;
40 41
}

42
int edac_mc_get_panic_on_ue(void)
43
{
44
	return edac_mc_panic_on_ue;
45 46
}

47 48 49
/* this is temporary */
int edac_mc_get_poll_msec(void)
{
50
	return edac_mc_poll_msec;
51 52
}

Arthur Jones's avatar
Arthur Jones committed
53 54
static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
{
55
	unsigned long l;
Arthur Jones's avatar
Arthur Jones committed
56 57 58 59 60
	int ret;

	if (!val)
		return -EINVAL;

61
	ret = kstrtoul(val, 0, &l);
62 63
	if (ret)
		return ret;
64 65

	if (l < 1000)
Arthur Jones's avatar
Arthur Jones committed
66
		return -EINVAL;
67 68

	*((unsigned long *)kp->arg) = l;
Arthur Jones's avatar
Arthur Jones committed
69 70 71 72 73 74 75

	/* notify edac_mc engine to reset the poll period */
	edac_mc_reset_delay_period(l);

	return 0;
}

76
/* Parameter declarations for above */
77 78 79 80
module_param(edac_mc_panic_on_ue, int, 0644);
MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
module_param(edac_mc_log_ue, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ue,
81
		 "Log uncorrectable error to console: 0=off 1=on");
82 83
module_param(edac_mc_log_ce, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ce,
84
		 "Log correctable error to console: 0=off 1=on");
Arthur Jones's avatar
Arthur Jones committed
85 86
module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
		  &edac_mc_poll_msec, 0644);
87
MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
88

89
static struct device *mci_pdev;
90

91 92 93
/*
 * various constants for Memory Controllers
 */
94
static const char * const mem_types[] = {
95 96 97 98 99 100 101 102 103 104
	[MEM_EMPTY] = "Empty",
	[MEM_RESERVED] = "Reserved",
	[MEM_UNKNOWN] = "Unknown",
	[MEM_FPM] = "FPM",
	[MEM_EDO] = "EDO",
	[MEM_BEDO] = "BEDO",
	[MEM_SDR] = "Unbuffered-SDR",
	[MEM_RDR] = "Registered-SDR",
	[MEM_DDR] = "Unbuffered-DDR",
	[MEM_RDDR] = "Registered-DDR",
105 106 107
	[MEM_RMBS] = "RMBS",
	[MEM_DDR2] = "Unbuffered-DDR2",
	[MEM_FB_DDR2] = "FullyBuffered-DDR2",
108
	[MEM_RDDR2] = "Registered-DDR2",
109 110
	[MEM_XDR] = "XDR",
	[MEM_DDR3] = "Unbuffered-DDR3",
Aristeu Rozanski's avatar
Aristeu Rozanski committed
111 112 113
	[MEM_RDDR3] = "Registered-DDR3",
	[MEM_DDR4] = "Unbuffered-DDR4",
	[MEM_RDDR4] = "Registered-DDR4"
114 115
};

116
static const char * const dev_types[] = {
117 118 119 120 121 122 123 124 125 126
	[DEV_UNKNOWN] = "Unknown",
	[DEV_X1] = "x1",
	[DEV_X2] = "x2",
	[DEV_X4] = "x4",
	[DEV_X8] = "x8",
	[DEV_X16] = "x16",
	[DEV_X32] = "x32",
	[DEV_X64] = "x64"
};

127
static const char * const edac_caps[] = {
128 129 130 131 132 133 134 135 136 137 138 139
	[EDAC_UNKNOWN] = "Unknown",
	[EDAC_NONE] = "None",
	[EDAC_RESERVED] = "Reserved",
	[EDAC_PARITY] = "PARITY",
	[EDAC_EC] = "EC",
	[EDAC_SECDED] = "SECDED",
	[EDAC_S2ECD2ED] = "S2ECD2ED",
	[EDAC_S4ECD4ED] = "S4ECD4ED",
	[EDAC_S8ECD8ED] = "S8ECD8ED",
	[EDAC_S16ECD16ED] = "S16ECD16ED"
};

140
#ifdef CONFIG_EDAC_LEGACY_SYSFS
141 142 143 144 145 146 147 148 149
/*
 * EDAC sysfs CSROW data structures and methods
 */

#define to_csrow(k) container_of(k, struct csrow_info, dev)

/*
 * We need it to avoid namespace conflicts between the legacy API
 * and the per-dimm/per-rank one
150
 */
151
#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
152
	static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
153 154 155 156 157 158 159

struct dev_ch_attribute {
	struct device_attribute attr;
	int channel;
};

#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
160
	static struct dev_ch_attribute dev_attr_legacy_##_name = \
161 162 163
		{ __ATTR(_name, _mode, _show, _store), (_var) }

#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
164 165

/* Set of more default csrow<id> attribute show/store functions */
166 167
static ssize_t csrow_ue_count_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
168
{
169 170
	struct csrow_info *csrow = to_csrow(dev);

171
	return sprintf(data, "%u\n", csrow->ue_count);
172 173
}

174 175
static ssize_t csrow_ce_count_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
176
{
177 178
	struct csrow_info *csrow = to_csrow(dev);

179
	return sprintf(data, "%u\n", csrow->ce_count);
180 181
}

182 183
static ssize_t csrow_size_show(struct device *dev,
			       struct device_attribute *mattr, char *data)
184
{
185
	struct csrow_info *csrow = to_csrow(dev);
186 187 188 189
	int i;
	u32 nr_pages = 0;

	for (i = 0; i < csrow->nr_channels; i++)
190
		nr_pages += csrow->channels[i]->dimm->nr_pages;
191
	return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
192 193
}

194 195
static ssize_t csrow_mem_type_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
196
{
197 198
	struct csrow_info *csrow = to_csrow(dev);

199
	return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]);
200 201
}

202 203
static ssize_t csrow_dev_type_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
204
{
205 206
	struct csrow_info *csrow = to_csrow(dev);

207
	return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
208 209
}

210 211 212
static ssize_t csrow_edac_mode_show(struct device *dev,
				    struct device_attribute *mattr,
				    char *data)
213
{
214 215
	struct csrow_info *csrow = to_csrow(dev);

216
	return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
217 218 219
}

/* show/store functions for DIMM Label attributes */
220 221 222
static ssize_t channel_dimm_label_show(struct device *dev,
				       struct device_attribute *mattr,
				       char *data)
223
{
224 225
	struct csrow_info *csrow = to_csrow(dev);
	unsigned chan = to_channel(mattr);
226
	struct rank_info *rank = csrow->channels[chan];
227

228
	/* if field has not been initialized, there is nothing to send */
229
	if (!rank->dimm->label[0])
230 231
		return 0;

232
	return snprintf(data, sizeof(rank->dimm->label) + 1, "%s\n",
233
			rank->dimm->label);
234 235
}

236 237 238
static ssize_t channel_dimm_label_store(struct device *dev,
					struct device_attribute *mattr,
					const char *data, size_t count)
239
{
240 241
	struct csrow_info *csrow = to_csrow(dev);
	unsigned chan = to_channel(mattr);
242
	struct rank_info *rank = csrow->channels[chan];
243
	size_t copy_count = count;
244

245 246 247 248 249 250
	if (count == 0)
		return -EINVAL;

	if (data[count - 1] == '\0' || data[count - 1] == '\n')
		copy_count -= 1;

251
	if (copy_count == 0 || copy_count >= sizeof(rank->dimm->label))
252
		return -EINVAL;
253

254 255
	strncpy(rank->dimm->label, data, copy_count);
	rank->dimm->label[copy_count] = '\0';
256

257
	return count;
258 259 260
}

/* show function for dynamic chX_ce_count attribute */
261 262
static ssize_t channel_ce_count_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
263
{
264 265
	struct csrow_info *csrow = to_csrow(dev);
	unsigned chan = to_channel(mattr);
266
	struct rank_info *rank = csrow->channels[chan];
267 268

	return sprintf(data, "%u\n", rank->ce_count);
269 270
}

271 272 273 274 275 276 277
/* cwrow<id>/attribute files */
DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
278

279 280 281 282 283 284 285 286 287 288
/* default attributes of the CSROW<id> object */
static struct attribute *csrow_attrs[] = {
	&dev_attr_legacy_dev_type.attr,
	&dev_attr_legacy_mem_type.attr,
	&dev_attr_legacy_edac_mode.attr,
	&dev_attr_legacy_size_mb.attr,
	&dev_attr_legacy_ue_count.attr,
	&dev_attr_legacy_ce_count.attr,
	NULL,
};
289

290 291 292
static struct attribute_group csrow_attr_grp = {
	.attrs	= csrow_attrs,
};
293

294 295 296 297
static const struct attribute_group *csrow_attr_groups[] = {
	&csrow_attr_grp,
	NULL
};
298

299
static void csrow_attr_release(struct device *dev)
300
{
301 302
	struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);

303
	edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
304
	kfree(csrow);
305 306
}

307 308 309
static struct device_type csrow_attr_type = {
	.groups		= csrow_attr_groups,
	.release	= csrow_attr_release,
310 311
};

312 313 314 315
/*
 * possible dynamic channel DIMM Label attribute files
 *
 */
316

317
DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
318
	channel_dimm_label_show, channel_dimm_label_store, 0);
319
DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
320
	channel_dimm_label_show, channel_dimm_label_store, 1);
321
DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
322
	channel_dimm_label_show, channel_dimm_label_store, 2);
323
DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
324
	channel_dimm_label_show, channel_dimm_label_store, 3);
325
DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
326
	channel_dimm_label_show, channel_dimm_label_store, 4);
327
DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
328
	channel_dimm_label_show, channel_dimm_label_store, 5);
329 330

/* Total possible dynamic DIMM Label attribute file table */
331 332 333 334 335 336 337 338
static struct attribute *dynamic_csrow_dimm_attr[] = {
	&dev_attr_legacy_ch0_dimm_label.attr.attr,
	&dev_attr_legacy_ch1_dimm_label.attr.attr,
	&dev_attr_legacy_ch2_dimm_label.attr.attr,
	&dev_attr_legacy_ch3_dimm_label.attr.attr,
	&dev_attr_legacy_ch4_dimm_label.attr.attr,
	&dev_attr_legacy_ch5_dimm_label.attr.attr,
	NULL
339 340 341
};

/* possible dynamic channel ce_count attribute files */
342
DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
343
		   channel_ce_count_show, NULL, 0);
344
DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
345
		   channel_ce_count_show, NULL, 1);
346
DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
347
		   channel_ce_count_show, NULL, 2);
348
DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
349
		   channel_ce_count_show, NULL, 3);
350
DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
351
		   channel_ce_count_show, NULL, 4);
352
DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
353
		   channel_ce_count_show, NULL, 5);
354 355

/* Total possible dynamic ce_count attribute file table */
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
static struct attribute *dynamic_csrow_ce_count_attr[] = {
	&dev_attr_legacy_ch0_ce_count.attr.attr,
	&dev_attr_legacy_ch1_ce_count.attr.attr,
	&dev_attr_legacy_ch2_ce_count.attr.attr,
	&dev_attr_legacy_ch3_ce_count.attr.attr,
	&dev_attr_legacy_ch4_ce_count.attr.attr,
	&dev_attr_legacy_ch5_ce_count.attr.attr,
	NULL
};

static umode_t csrow_dev_is_visible(struct kobject *kobj,
				    struct attribute *attr, int idx)
{
	struct device *dev = kobj_to_dev(kobj);
	struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);

	if (idx >= csrow->nr_channels)
		return 0;
	/* Only expose populated DIMMs */
	if (!csrow->channels[idx]->dimm->nr_pages)
		return 0;
	return attr->mode;
}


static const struct attribute_group csrow_dev_dimm_group = {
	.attrs = dynamic_csrow_dimm_attr,
	.is_visible = csrow_dev_is_visible,
};

static const struct attribute_group csrow_dev_ce_count_group = {
	.attrs = dynamic_csrow_ce_count_attr,
	.is_visible = csrow_dev_is_visible,
};

static const struct attribute_group *csrow_dev_groups[] = {
	&csrow_dev_dimm_group,
	&csrow_dev_ce_count_group,
	NULL
395 396
};

397 398 399 400 401
static inline int nr_pages_per_csrow(struct csrow_info *csrow)
{
	int chan, nr_pages = 0;

	for (chan = 0; chan < csrow->nr_channels; chan++)
402
		nr_pages += csrow->channels[chan]->dimm->nr_pages;
403 404 405 406

	return nr_pages;
}

407 408 409
/* Create a CSROW object under specifed edac_mc_device */
static int edac_create_csrow_object(struct mem_ctl_info *mci,
				    struct csrow_info *csrow, int index)
410
{
411
	csrow->dev.type = &csrow_attr_type;
Borislav Petkov's avatar
Borislav Petkov committed
412
	csrow->dev.bus = mci->bus;
413
	csrow->dev.groups = csrow_dev_groups;
414 415
	device_initialize(&csrow->dev);
	csrow->dev.parent = &mci->dev;
Borislav Petkov's avatar
Borislav Petkov committed
416
	csrow->mci = mci;
417 418
	dev_set_name(&csrow->dev, "csrow%d", index);
	dev_set_drvdata(&csrow->dev, csrow);
419

420 421
	edac_dbg(0, "creating (virtual) csrow node %s\n",
		 dev_name(&csrow->dev));
422

423
	return device_add(&csrow->dev);
424
}
425 426

/* Create a CSROW object under specifed edac_mc_device */
427
static int edac_create_csrow_objects(struct mem_ctl_info *mci)
428
{
429
	int err, i;
430
	struct csrow_info *csrow;
431

432
	for (i = 0; i < mci->nr_csrows; i++) {
433
		csrow = mci->csrows[i];
434 435
		if (!nr_pages_per_csrow(csrow))
			continue;
436
		err = edac_create_csrow_object(mci, mci->csrows[i], i);
437 438 439 440
		if (err < 0) {
			edac_dbg(1,
				 "failure: create csrow objects for csrow %d\n",
				 i);
441
			goto error;
442
		}
443 444
	}
	return 0;
445

446 447
error:
	for (--i; i >= 0; i--) {
448
		csrow = mci->csrows[i];
449 450
		if (!nr_pages_per_csrow(csrow))
			continue;
451
		put_device(&mci->csrows[i]->dev);
452
	}
453

454 455
	return err;
}
456

457 458
static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
{
459
	int i;
460
	struct csrow_info *csrow;
461

462
	for (i = mci->nr_csrows - 1; i >= 0; i--) {
463
		csrow = mci->csrows[i];
464 465
		if (!nr_pages_per_csrow(csrow))
			continue;
466
		device_unregister(&mci->csrows[i]->dev);
467 468
	}
}
469 470 471 472 473 474 475 476 477 478 479 480 481 482
#endif

/*
 * Per-dimm (or per-rank) devices
 */

#define to_dimm(k) container_of(k, struct dimm_info, dev)

/* show/store functions for DIMM Label attributes */
static ssize_t dimmdev_location_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

483
	return edac_dimm_info_location(dimm, data, PAGE_SIZE);
484 485 486 487 488 489 490 491 492 493 494
}

static ssize_t dimmdev_label_show(struct device *dev,
				  struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	/* if field has not been initialized, there is nothing to send */
	if (!dimm->label[0])
		return 0;

495
	return snprintf(data, sizeof(dimm->label) + 1, "%s\n", dimm->label);
496 497 498 499 500 501 502 503
}

static ssize_t dimmdev_label_store(struct device *dev,
				   struct device_attribute *mattr,
				   const char *data,
				   size_t count)
{
	struct dimm_info *dimm = to_dimm(dev);
504
	size_t copy_count = count;
505

506 507 508 509 510 511
	if (count == 0)
		return -EINVAL;

	if (data[count - 1] == '\0' || data[count - 1] == '\n')
		copy_count -= 1;

512
	if (copy_count == 0 || copy_count >= sizeof(dimm->label))
513
		return -EINVAL;
514

515 516
	strncpy(dimm->label, data, copy_count);
	dimm->label[copy_count] = '\0';
517

518
	return count;
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
}

static ssize_t dimmdev_size_show(struct device *dev,
				 struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
}

static ssize_t dimmdev_mem_type_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%s\n", mem_types[dimm->mtype]);
}

static ssize_t dimmdev_dev_type_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%s\n", dev_types[dimm->dtype]);
}

static ssize_t dimmdev_edac_mode_show(struct device *dev,
				      struct device_attribute *mattr,
				      char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
}

/* dimm/rank attribute files */
static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
		   dimmdev_label_show, dimmdev_label_store);
static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL);
static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);

/* attributes of the dimm<id>/rank<id> object */
static struct attribute *dimm_attrs[] = {
	&dev_attr_dimm_label.attr,
	&dev_attr_dimm_location.attr,
	&dev_attr_size.attr,
	&dev_attr_dimm_mem_type.attr,
	&dev_attr_dimm_dev_type.attr,
	&dev_attr_dimm_edac_mode.attr,
	NULL,
};

static struct attribute_group dimm_attr_grp = {
	.attrs	= dimm_attrs,
};

static const struct attribute_group *dimm_attr_groups[] = {
	&dimm_attr_grp,
	NULL
};

583
static void dimm_attr_release(struct device *dev)
584
{
585 586
	struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);

587
	edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
588
	kfree(dimm);
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
}

static struct device_type dimm_attr_type = {
	.groups		= dimm_attr_groups,
	.release	= dimm_attr_release,
};

/* Create a DIMM object under specifed memory controller device */
static int edac_create_dimm_object(struct mem_ctl_info *mci,
				   struct dimm_info *dimm,
				   int index)
{
	int err;
	dimm->mci = mci;

	dimm->dev.type = &dimm_attr_type;
Borislav Petkov's avatar
Borislav Petkov committed
605
	dimm->dev.bus = mci->bus;
606 607 608
	device_initialize(&dimm->dev);

	dimm->dev.parent = &mci->dev;
609
	if (mci->csbased)
610 611 612 613 614 615 616 617
		dev_set_name(&dimm->dev, "rank%d", index);
	else
		dev_set_name(&dimm->dev, "dimm%d", index);
	dev_set_drvdata(&dimm->dev, dimm);
	pm_runtime_forbid(&mci->dev);

	err =  device_add(&dimm->dev);

618
	edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev));
619 620 621

	return err;
}
622

623 624 625 626 627
/*
 * Memory controller device
 */

#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
628

629 630
static ssize_t mci_reset_counters_store(struct device *dev,
					struct device_attribute *mattr,
631
					const char *data, size_t count)
632
{
633 634
	struct mem_ctl_info *mci = to_mci(dev);
	int cnt, row, chan, i;
635 636
	mci->ue_mc = 0;
	mci->ce_mc = 0;
637 638
	mci->ue_noinfo_count = 0;
	mci->ce_noinfo_count = 0;
639 640

	for (row = 0; row < mci->nr_csrows; row++) {
641
		struct csrow_info *ri = mci->csrows[row];
642 643 644 645 646

		ri->ue_count = 0;
		ri->ce_count = 0;

		for (chan = 0; chan < ri->nr_channels; chan++)
647
			ri->channels[chan]->ce_count = 0;
648 649
	}

650 651 652 653 654 655 656
	cnt = 1;
	for (i = 0; i < mci->n_layers; i++) {
		cnt *= mci->layers[i].size;
		memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32));
		memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32));
	}

657 658 659 660
	mci->start_time = jiffies;
	return count;
}

661 662 663 664 665 666 667 668 669
/* Memory scrubbing interface:
 *
 * A MC driver can limit the scrubbing bandwidth based on the CPU type.
 * Therefore, ->set_sdram_scrub_rate should be made to return the actual
 * bandwidth that is accepted or 0 when scrubbing is to be disabled.
 *
 * Negative value still means that an error has occurred while setting
 * the scrub rate.
 */
670 671
static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
					  struct device_attribute *mattr,
672
					  const char *data, size_t count)
673
{
674
	struct mem_ctl_info *mci = to_mci(dev);
675
	unsigned long bandwidth = 0;
676
	int new_bw = 0;
677

678
	if (kstrtoul(data, 10, &bandwidth) < 0)
679
		return -EINVAL;
680

681
	new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
682 683 684 685
	if (new_bw < 0) {
		edac_printk(KERN_WARNING, EDAC_MC,
			    "Error setting scrub rate to: %lu\n", bandwidth);
		return -EINVAL;
686
	}
687

688
	return count;
689 690
}

691 692 693
/*
 * ->get_sdram_scrub_rate() return value semantics same as above.
 */
694 695 696
static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
					 struct device_attribute *mattr,
					 char *data)
697
{
698
	struct mem_ctl_info *mci = to_mci(dev);
699
	int bandwidth = 0;
700

701 702
	bandwidth = mci->get_sdram_scrub_rate(mci);
	if (bandwidth < 0) {
703
		edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
704
		return bandwidth;
705
	}
706 707

	return sprintf(data, "%d\n", bandwidth);
708 709 710
}

/* default attribute files for the MCI object */
711 712 713
static ssize_t mci_ue_count_show(struct device *dev,
				 struct device_attribute *mattr,
				 char *data)
714
{
715 716
	struct mem_ctl_info *mci = to_mci(dev);

717
	return sprintf(data, "%d\n", mci->ue_mc);
718 719
}

720 721 722
static ssize_t mci_ce_count_show(struct device *dev,
				 struct device_attribute *mattr,
				 char *data)
723
{
724 725
	struct mem_ctl_info *mci = to_mci(dev);

726
	return sprintf(data, "%d\n", mci->ce_mc);
727 728
}

729 730 731
static ssize_t mci_ce_noinfo_show(struct device *dev,
				  struct device_attribute *mattr,
				  char *data)
732
{
733 734
	struct mem_ctl_info *mci = to_mci(dev);

735
	return sprintf(data, "%d\n", mci->ce_noinfo_count);
736 737
}

738 739 740
static ssize_t mci_ue_noinfo_show(struct device *dev,
				  struct device_attribute *mattr,
				  char *data)
741
{
742 743
	struct mem_ctl_info *mci = to_mci(dev);

744
	return sprintf(data, "%d\n", mci->ue_noinfo_count);
745 746
}

747 748 749
static ssize_t mci_seconds_show(struct device *dev,
				struct device_attribute *mattr,
				char *data)
750
{
751 752
	struct mem_ctl_info *mci = to_mci(dev);

753
	return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
754 755
}

756 757 758
static ssize_t mci_ctl_name_show(struct device *dev,
				 struct device_attribute *mattr,
				 char *data)
759
{
760 761
	struct mem_ctl_info *mci = to_mci(dev);

762
	return sprintf(data, "%s\n", mci->ctl_name);
763 764
}

765 766 767
static ssize_t mci_size_mb_show(struct device *dev,
				struct device_attribute *mattr,
				char *data)
768
{
769
	struct mem_ctl_info *mci = to_mci(dev);
770
	int total_pages = 0, csrow_idx, j;
771

772
	for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
773
		struct csrow_info *csrow = mci->csrows[csrow_idx];
774

775 776
		for (j = 0; j < csrow->nr_channels; j++) {
			struct dimm_info *dimm = csrow->channels[j]->dimm;
777

778
			total_pages += dimm->nr_pages;
779
		}
780 781
	}

782
	return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
783 784
}

785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
static ssize_t mci_max_location_show(struct device *dev,
				     struct device_attribute *mattr,
				     char *data)
{
	struct mem_ctl_info *mci = to_mci(dev);
	int i;
	char *p = data;

	for (i = 0; i < mci->n_layers; i++) {
		p += sprintf(p, "%s %d ",
			     edac_layer_name[mci->layers[i].type],
			     mci->layers[i].size - 1);
	}

	return p - data;
}

802
/* default Control file */
803
static DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
804 805

/* default Attribute files */
806 807 808 809 810 811 812 813
static DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
static DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
static DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
static DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
static DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
static DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
static DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
static DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
814 815

/* memory scrubber attribute file */
816 817
DEVICE_ATTR(sdram_scrub_rate, 0, mci_sdram_scrub_rate_show,
	    mci_sdram_scrub_rate_store); /* umode set later in is_visible */
818

819 820 821 822 823 824 825 826 827
static struct attribute *mci_attrs[] = {
	&dev_attr_reset_counters.attr,
	&dev_attr_mc_name.attr,
	&dev_attr_size_mb.attr,
	&dev_attr_seconds_since_reset.attr,
	&dev_attr_ue_noinfo_count.attr,
	&dev_attr_ce_noinfo_count.attr,
	&dev_attr_ue_count.attr,
	&dev_attr_ce_count.attr,
828
	&dev_attr_max_location.attr,
829
	&dev_attr_sdram_scrub_rate.attr,
830 831 832
	NULL
};

833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
static umode_t mci_attr_is_visible(struct kobject *kobj,
				   struct attribute *attr, int idx)
{
	struct device *dev = kobj_to_dev(kobj);
	struct mem_ctl_info *mci = to_mci(dev);
	umode_t mode = 0;

	if (attr != &dev_attr_sdram_scrub_rate.attr)
		return attr->mode;
	if (mci->get_sdram_scrub_rate)
		mode |= S_IRUGO;
	if (mci->set_sdram_scrub_rate)
		mode |= S_IWUSR;
	return mode;
}

849 850
static struct attribute_group mci_attr_grp = {
	.attrs	= mci_attrs,
851
	.is_visible = mci_attr_is_visible,
852 853
};

854 855 856
static const struct attribute_group *mci_attr_groups[] = {
	&mci_attr_grp,
	NULL
857 858
};

859
static void mci_attr_release(struct device *dev)
860
{
861 862
	struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);

863
	edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
864
	kfree(mci);
865 866
}

867 868 869 870
static struct device_type mci_attr_type = {
	.groups		= mci_attr_groups,
	.release	= mci_attr_release,
};
871

872 873 874 875 876 877 878 879
/*
 * Create a new Memory Controller kobject instance,
 *	mc<id> under the 'mc' directory
 *
 * Return:
 *	0	Success
 *	!0	Failure
 */
880 881
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
				 const struct attribute_group **groups)
882
{
883
	char *name;
884
	int i, err;
885

886 887 888 889
	/*
	 * The memory controller needs its own bus, in order to avoid
	 * namespace conflicts at /sys/bus/edac.
	 */
890 891
	name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
	if (!name)
892
		return -ENOMEM;
Borislav Petkov's avatar
Borislav Petkov committed
893

894 895
	mci->bus->name = name;

Borislav Petkov's avatar
Borislav Petkov committed
896 897 898
	edac_dbg(0, "creating bus %s\n", mci->bus->name);

	err = bus_register(mci->bus);
899 900 901 902
	if (err < 0) {
		kfree(name);
		return err;
	}
903

904 905 906
	/* get the /sys/devices/system/edac subsys reference */
	mci->dev.type = &mci_attr_type;
	device_initialize(&mci->dev);
907

908
	mci->dev.parent = mci_pdev;
Borislav Petkov's avatar
Borislav Petkov committed
909
	mci->dev.bus = mci->bus;
910
	mci->dev.groups = groups;
911 912 913 914
	dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
	dev_set_drvdata(&mci->dev, mci);
	pm_runtime_forbid(&mci->dev);

915
	edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
916 917
	err = device_add(&mci->dev);
	if (err < 0) {
918
		edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
919
		goto fail_unregister_bus;
920 921
	}

922 923
	/*
	 * Create the dimm/rank devices
924
	 */
925
	for (i = 0; i < mci->tot_dimms; i++) {
926
		struct dimm_info *dimm = mci->dimms[i];
927
		/* Only expose populated DIMMs */
928
		if (!dimm->nr_pages)
929
			continue;
930

931
#ifdef CONFIG_EDAC_DEBUG
932
		edac_dbg(1, "creating dimm%d, located at ", i);
933 934 935 936 937 938 939
		if (edac_debug_level >= 1) {
			int lay;
			for (lay = 0; lay < mci->n_layers; lay++)
				printk(KERN_CONT "%s %d ",
					edac_layer_name[mci->layers[lay].type],
					dimm->location[lay]);
			printk(KERN_CONT "\n");
940
		}
941
#endif
942 943
		err = edac_create_dimm_object(mci, dimm, i);
		if (err) {
944
			edac_dbg(1, "failure: create dimm %d obj\n", i);
945
			goto fail_unregister_dimm;
946
		}
947 948
	}

949
#ifdef CONFIG_EDAC_LEGACY_SYSFS
950 951
	err = edac_create_csrow_objects(mci);
	if (err < 0)
952
		goto fail_unregister_dimm;
953
#endif
954

955
	edac_create_debugfs_nodes(mci);
956 957
	return 0;

958
fail_unregister_dimm:
959
	for (i--; i >= 0; i--) {
960
		struct dimm_info *dimm = mci->dimms[i];
961
		if (!dimm->nr_pages)
962
			continue;
963

964
		device_unregister(&dimm->dev);
965
	}
966
	device_unregister(&mci->dev);
967
fail_unregister_bus:
Borislav Petkov's avatar
Borislav Petkov committed
968
	bus_unregister(mci->bus);
969 970
	kfree(name);

971 972 973 974 975 976 977 978
	return err;
}

/*
 * remove a Memory Controller instance
 */
void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
{
979
	int i;
980

981
	edac_dbg(0, "\n");
982

983
#ifdef CONFIG_EDAC_DEBUG
984
	edac_debugfs_remove_recursive(mci->debugfs);
985
#endif
986
#ifdef CONFIG_EDAC_LEGACY_SYSFS
987
	edac_delete_csrow_objects(mci);
988
#endif
989

990
	for (i = 0; i < mci->tot_dimms; i++) {
991
		struct dimm_info *dimm = mci->dimms[i];
992 993
		if (dimm->nr_pages == 0)
			continue;
994
		edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
995
		device_unregister(&dimm->dev);
996
	}
997
}
998

999 1000
void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
1001 1002
	const char *name = mci->bus->name;

1003
	edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
1004
	device_unregister(&mci->dev);
Borislav Petkov's avatar
Borislav Petkov committed
1005
	bus_unregister(mci->bus);
1006
	kfree(name);
1007
}
1008

1009
static void mc_attr_release(struct device *dev)
1010
{
1011 1012 1013 1014 1015
	/*
	 * There's no container structure here, as this is just the mci
	 * parent device, used to create the /sys/devices/mc sysfs node.
	 * So, there are no attributes on it.
	 */
1016
	edac_dbg(1, "Releasing device %s\n", dev_name(dev));
1017
	kfree(dev);
1018
}
1019

1020 1021 1022
static struct device_type mc_attr_type = {
	.release	= mc_attr_release,
};
1023
/*
1024
 * Init/exit code for the module. Basically, creates/removes /sys/class/rc
1025
 */
1026
int __init edac_mc_sysfs_init(void)
1027
{
1028
	struct bus_type *edac_subsys;
1029
	int err;
1030

1031 1032 1033
	/* get the /sys/devices/system/edac subsys reference */
	edac_subsys = edac_get_sysfs_subsys();
	if (edac_subsys == NULL) {
1034
		edac_dbg(1, "no edac_subsys\n");
1035 1036
		err = -EINVAL;
		goto out;
1037 1038
	}

1039
	mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
1040 1041 1042 1043
	if (!mci_pdev) {
		err = -ENOMEM;
		goto out_put_sysfs;
	}
1044 1045 1046 1047 1048

	mci_pdev->bus = edac_subsys;
	mci_pdev->type = &mc_attr_type;
	device_initialize(mci_pdev);
	dev_set_name(mci_pdev, "mc");
1049

1050
	err = device_add(mci_pdev);
1051
	if (err < 0)
1052
		goto out_dev_free;
1053

1054
	edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
1055

1056
	return 0;
1057 1058 1059 1060 1061 1062 1063

 out_dev_free:
	kfree(mci_pdev);
 out_put_sysfs:
	edac_put_sysfs_subsys();
 out:
	return err;
1064 1065
}

1066
void edac_mc_sysfs_exit(void)
1067
{
1068
	device_unregister(mci_pdev);
1069
	edac_put_sysfs_subsys();
1070
}