trace_selftest.c 13.3 KB
Newer Older
Steven Rostedt's avatar
Steven Rostedt committed
1 2 3
/* Include in trace.c */

#include <linux/kthread.h>
Ingo Molnar's avatar
Ingo Molnar committed
4
#include <linux/delay.h>
Steven Rostedt's avatar
Steven Rostedt committed
5

Ingo Molnar's avatar
Ingo Molnar committed
6
static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt's avatar
Steven Rostedt committed
7 8 9 10
{
	switch (entry->type) {
	case TRACE_FN:
	case TRACE_CTX:
11
	case TRACE_WAKE:
12
	case TRACE_CONT:
13
	case TRACE_STACK:
14
	case TRACE_PRINT:
15
	case TRACE_SPECIAL:
16
	case TRACE_BRANCH:
Steven Rostedt's avatar
Steven Rostedt committed
17 18 19 20 21
		return 1;
	}
	return 0;
}

22
static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt's avatar
Steven Rostedt committed
23
{
24 25
	struct ring_buffer_event *event;
	struct trace_entry *entry;
26
	unsigned int loops = 0;
Steven Rostedt's avatar
Steven Rostedt committed
27

28 29
	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
		entry = ring_buffer_event_data(event);
Steven Rostedt's avatar
Steven Rostedt committed
30

31 32 33 34 35 36 37 38 39
		/*
		 * The ring buffer is a size of trace_buf_size, if
		 * we loop more than the size, there's something wrong
		 * with the ring buffer.
		 */
		if (loops++ > trace_buf_size) {
			printk(KERN_CONT ".. bad ring buffer ");
			goto failed;
		}
40
		if (!trace_valid_entry(entry)) {
Ingo Molnar's avatar
Ingo Molnar committed
41
			printk(KERN_CONT ".. invalid entry %d ",
42
				entry->type);
Steven Rostedt's avatar
Steven Rostedt committed
43 44 45 46 47 48
			goto failed;
		}
	}
	return 0;

 failed:
49 50
	/* disable tracing */
	tracing_disabled = 1;
Steven Rostedt's avatar
Steven Rostedt committed
51 52 53 54 55 56 57 58 59 60
	printk(KERN_CONT ".. corrupted trace buffer .. ");
	return -1;
}

/*
 * Test the trace buffer to see if all the elements
 * are still sane.
 */
static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
{
61 62
	unsigned long flags, cnt = 0;
	int cpu, ret = 0;
Steven Rostedt's avatar
Steven Rostedt committed
63

64
	/* Don't allow flipping of max traces now */
65
	local_irq_save(flags);
66
	__raw_spin_lock(&ftrace_max_lock);
Steven Rostedt's avatar
Steven Rostedt committed
67

68
	cnt = ring_buffer_entries(tr->buffer);
Steven Rostedt's avatar
Steven Rostedt committed
69

70 71 72 73 74 75 76 77
	/*
	 * The trace_test_buffer_cpu runs a while loop to consume all data.
	 * If the calling tracer is broken, and is constantly filling
	 * the buffer, this will run forever, and hard lock the box.
	 * We disable the ring buffer while we do this test to prevent
	 * a hard lock up.
	 */
	tracing_off();
78 79
	for_each_possible_cpu(cpu) {
		ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt's avatar
Steven Rostedt committed
80 81 82
		if (ret)
			break;
	}
83
	tracing_on();
84
	__raw_spin_unlock(&ftrace_max_lock);
85
	local_irq_restore(flags);
Steven Rostedt's avatar
Steven Rostedt committed
86 87 88 89 90 91 92

	if (count)
		*count = cnt;

	return ret;
}

93 94 95 96 97
static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
{
	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
		trace->name, init_ret);
}
98
#ifdef CONFIG_FUNCTION_TRACER
99 100 101 102 103 104 105 106 107 108 109 110 111

#ifdef CONFIG_DYNAMIC_FTRACE

#define __STR(x) #x
#define STR(x) __STR(x)

/* Test dynamic code modification and ftrace filters */
int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
					   struct trace_array *tr,
					   int (*func)(void))
{
	int save_ftrace_enabled = ftrace_enabled;
	int save_tracer_enabled = tracer_enabled;
112
	unsigned long count;
Steven Rostedt's avatar
Steven Rostedt committed
113
	char *func_name;
114
	int ret;
115 116 117 118 119 120 121 122 123 124 125 126

	/* The ftrace test PASSED */
	printk(KERN_CONT "PASSED\n");
	pr_info("Testing dynamic ftrace: ");

	/* enable tracing, and record the filter function */
	ftrace_enabled = 1;
	tracer_enabled = 1;

	/* passed in by parameter to fool gcc from optimizing */
	func();

Steven Rostedt's avatar
Steven Rostedt committed
127 128 129 130 131 132 133
	/*
	 * Some archs *cough*PowerPC*cough* add charachters to the
	 * start of the function names. We simply put a '*' to
	 * accomodate them.
	 */
	func_name = "*" STR(DYN_FTRACE_TEST_NAME);

134
	/* filter only on our function */
Steven Rostedt's avatar
Steven Rostedt committed
135
	ftrace_set_filter(func_name, strlen(func_name), 1);
136 137

	/* enable tracing */
138 139 140 141 142
	ret = trace->init(tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		goto out;
	}
143

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
	/* Sleep for a 1/10 of a second */
	msleep(100);

	/* we should have nothing in the buffer */
	ret = trace_test_buffer(tr, &count);
	if (ret)
		goto out;

	if (count) {
		ret = -1;
		printk(KERN_CONT ".. filter did not filter .. ");
		goto out;
	}

	/* call our function again */
	func();

	/* sleep again */
	msleep(100);

	/* stop the tracing. */
165
	tracing_stop();
166 167 168 169 170
	ftrace_enabled = 0;

	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
171
	tracing_start();
172 173 174

	/* we should only have one item */
	if (!ret && count != 1) {
175
		printk(KERN_CONT ".. filter failed count=%ld ..", count);
176 177 178
		ret = -1;
		goto out;
	}
179

180 181 182 183 184 185 186 187 188 189 190 191
 out:
	ftrace_enabled = save_ftrace_enabled;
	tracer_enabled = save_tracer_enabled;

	/* Enable tracing on all functions again */
	ftrace_set_filter(NULL, 0, 1);

	return ret;
}
#else
# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedt's avatar
Steven Rostedt committed
192 193 194 195 196 197 198 199
/*
 * Simple verification test of ftrace function tracer.
 * Enable ftrace, sleep 1/10 second, and then read the trace
 * buffer to see if all is in order.
 */
int
trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
{
200 201
	int save_ftrace_enabled = ftrace_enabled;
	int save_tracer_enabled = tracer_enabled;
202 203
	unsigned long count;
	int ret;
Steven Rostedt's avatar
Steven Rostedt committed
204

205 206 207
	/* make sure msleep has been recorded */
	msleep(1);

Steven Rostedt's avatar
Steven Rostedt committed
208
	/* start the tracing */
Ingo Molnar's avatar
Ingo Molnar committed
209
	ftrace_enabled = 1;
210
	tracer_enabled = 1;
Ingo Molnar's avatar
Ingo Molnar committed
211

212 213 214 215 216 217
	ret = trace->init(tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		goto out;
	}

Steven Rostedt's avatar
Steven Rostedt committed
218 219 220
	/* Sleep for a 1/10 of a second */
	msleep(100);
	/* stop the tracing. */
221
	tracing_stop();
Ingo Molnar's avatar
Ingo Molnar committed
222 223
	ftrace_enabled = 0;

Steven Rostedt's avatar
Steven Rostedt committed
224 225 226
	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
227
	tracing_start();
Steven Rostedt's avatar
Steven Rostedt committed
228 229 230 231

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
232
		goto out;
Steven Rostedt's avatar
Steven Rostedt committed
233 234
	}

235 236 237 238 239 240 241
	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
						     DYN_FTRACE_TEST_NAME);

 out:
	ftrace_enabled = save_ftrace_enabled;
	tracer_enabled = save_tracer_enabled;

242 243 244 245
	/* kill ftrace totally if we failed */
	if (ret)
		ftrace_kill();

Steven Rostedt's avatar
Steven Rostedt committed
246 247
	return ret;
}
248
#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt's avatar
Steven Rostedt committed
249 250 251 252 253 254 255 256 257 258

#ifdef CONFIG_IRQSOFF_TRACER
int
trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	unsigned long count;
	int ret;

	/* start the tracing */
259 260 261 262 263 264
	ret = trace->init(tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

Steven Rostedt's avatar
Steven Rostedt committed
265 266 267 268 269 270 271
	/* reset the max latency */
	tracing_max_latency = 0;
	/* disable interrupts for a bit */
	local_irq_disable();
	udelay(100);
	local_irq_enable();
	/* stop the tracing. */
272
	tracing_stop();
Steven Rostedt's avatar
Steven Rostedt committed
273 274 275 276 277
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);
	trace->reset(tr);
278
	tracing_start();
Steven Rostedt's avatar
Steven Rostedt committed
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	tracing_max_latency = save_max;

	return ret;
}
#endif /* CONFIG_IRQSOFF_TRACER */

#ifdef CONFIG_PREEMPT_TRACER
int
trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	unsigned long count;
	int ret;

299 300 301 302 303 304 305 306 307 308 309 310 311
	/*
	 * Now that the big kernel lock is no longer preemptable,
	 * and this is called with the BKL held, it will always
	 * fail. If preemption is already disabled, simply
	 * pass the test. When the BKL is removed, or becomes
	 * preemptible again, we will once again test this,
	 * so keep it in.
	 */
	if (preempt_count()) {
		printk(KERN_CONT "can not test ... force ");
		return 0;
	}

Steven Rostedt's avatar
Steven Rostedt committed
312
	/* start the tracing */
313 314 315 316 317 318
	ret = trace->init(tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

Steven Rostedt's avatar
Steven Rostedt committed
319 320 321 322 323 324 325
	/* reset the max latency */
	tracing_max_latency = 0;
	/* disable preemption for a bit */
	preempt_disable();
	udelay(100);
	preempt_enable();
	/* stop the tracing. */
326
	tracing_stop();
Steven Rostedt's avatar
Steven Rostedt committed
327 328 329 330 331
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);
	trace->reset(tr);
332
	tracing_start();
Steven Rostedt's avatar
Steven Rostedt committed
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	tracing_max_latency = save_max;

	return ret;
}
#endif /* CONFIG_PREEMPT_TRACER */

#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
int
trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	unsigned long count;
	int ret;

353 354 355 356 357 358 359 360 361 362 363 364 365
	/*
	 * Now that the big kernel lock is no longer preemptable,
	 * and this is called with the BKL held, it will always
	 * fail. If preemption is already disabled, simply
	 * pass the test. When the BKL is removed, or becomes
	 * preemptible again, we will once again test this,
	 * so keep it in.
	 */
	if (preempt_count()) {
		printk(KERN_CONT "can not test ... force ");
		return 0;
	}

Steven Rostedt's avatar
Steven Rostedt committed
366
	/* start the tracing */
367 368 369 370 371
	ret = trace->init(tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		goto out;
	}
Steven Rostedt's avatar
Steven Rostedt committed
372 373 374 375 376 377 378 379 380 381 382 383 384

	/* reset the max latency */
	tracing_max_latency = 0;

	/* disable preemption and interrupts for a bit */
	preempt_disable();
	local_irq_disable();
	udelay(100);
	preempt_enable();
	/* reverse the order of preempt vs irqs */
	local_irq_enable();

	/* stop the tracing. */
385
	tracing_stop();
Steven Rostedt's avatar
Steven Rostedt committed
386 387
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
388 389
	if (ret) {
		tracing_start();
Steven Rostedt's avatar
Steven Rostedt committed
390
		goto out;
391
	}
Steven Rostedt's avatar
Steven Rostedt committed
392 393

	ret = trace_test_buffer(&max_tr, &count);
394 395
	if (ret) {
		tracing_start();
Steven Rostedt's avatar
Steven Rostedt committed
396
		goto out;
397
	}
Steven Rostedt's avatar
Steven Rostedt committed
398 399 400 401

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
402
		tracing_start();
Steven Rostedt's avatar
Steven Rostedt committed
403 404 405 406 407
		goto out;
	}

	/* do the test by disabling interrupts first this time */
	tracing_max_latency = 0;
408
	tracing_start();
Steven Rostedt's avatar
Steven Rostedt committed
409 410 411 412 413 414 415 416
	preempt_disable();
	local_irq_disable();
	udelay(100);
	preempt_enable();
	/* reverse the order of preempt vs irqs */
	local_irq_enable();

	/* stop the tracing. */
417
	tracing_stop();
Steven Rostedt's avatar
Steven Rostedt committed
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (ret)
		goto out;

	ret = trace_test_buffer(&max_tr, &count);

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
		goto out;
	}

 out:
	trace->reset(tr);
433
	tracing_start();
Steven Rostedt's avatar
Steven Rostedt committed
434 435 436 437 438 439
	tracing_max_latency = save_max;

	return ret;
}
#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */

Steven Noonan's avatar
Steven Noonan committed
440 441 442 443 444 445 446 447 448
#ifdef CONFIG_NOP_TRACER
int
trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
{
	/* What could possibly go wrong? */
	return 0;
}
#endif

Steven Rostedt's avatar
Steven Rostedt committed
449 450 451 452
#ifdef CONFIG_SCHED_TRACER
static int trace_wakeup_test_thread(void *data)
{
	/* Make this a RT thread, doesn't need to be too high */
453 454
	struct sched_param param = { .sched_priority = 5 };
	struct completion *x = data;
Steven Rostedt's avatar
Steven Rostedt committed
455

456
	sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt's avatar
Steven Rostedt committed
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489

	/* Make it know we have a new prio */
	complete(x);

	/* now go to sleep and let the test wake us up */
	set_current_state(TASK_INTERRUPTIBLE);
	schedule();

	/* we are awake, now wait to disappear */
	while (!kthread_should_stop()) {
		/*
		 * This is an RT task, do short sleeps to let
		 * others run.
		 */
		msleep(100);
	}

	return 0;
}

int
trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	struct task_struct *p;
	struct completion isrt;
	unsigned long count;
	int ret;

	init_completion(&isrt);

	/* create a high prio thread */
	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnar's avatar
Ingo Molnar committed
490
	if (IS_ERR(p)) {
Steven Rostedt's avatar
Steven Rostedt committed
491 492 493 494 495 496 497 498
		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
		return -1;
	}

	/* make sure the thread is running at an RT prio */
	wait_for_completion(&isrt);

	/* start the tracing */
499 500 501 502 503 504
	ret = trace->init(tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

Steven Rostedt's avatar
Steven Rostedt committed
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
	/* reset the max latency */
	tracing_max_latency = 0;

	/* sleep to let the RT thread sleep too */
	msleep(100);

	/*
	 * Yes this is slightly racy. It is possible that for some
	 * strange reason that the RT thread we created, did not
	 * call schedule for 100ms after doing the completion,
	 * and we do a wakeup on a task that already is awake.
	 * But that is extremely unlikely, and the worst thing that
	 * happens in such a case, is that we disable tracing.
	 * Honestly, if this race does happen something is horrible
	 * wrong with the system.
	 */

	wake_up_process(p);

524 525 526
	/* give a little time to let the thread wake up */
	msleep(100);

Steven Rostedt's avatar
Steven Rostedt committed
527
	/* stop the tracing. */
528
	tracing_stop();
Steven Rostedt's avatar
Steven Rostedt committed
529 530 531 532 533 534 535
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);


	trace->reset(tr);
536
	tracing_start();
Steven Rostedt's avatar
Steven Rostedt committed
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559

	tracing_max_latency = save_max;

	/* kill the thread */
	kthread_stop(p);

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	return ret;
}
#endif /* CONFIG_SCHED_TRACER */

#ifdef CONFIG_CONTEXT_SWITCH_TRACER
int
trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
{
	unsigned long count;
	int ret;

	/* start the tracing */
560 561 562 563 564 565
	ret = trace->init(tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

Steven Rostedt's avatar
Steven Rostedt committed
566 567 568
	/* Sleep for a 1/10 of a second */
	msleep(100);
	/* stop the tracing. */
569
	tracing_stop();
Steven Rostedt's avatar
Steven Rostedt committed
570 571 572
	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
573
	tracing_start();
Steven Rostedt's avatar
Steven Rostedt committed
574 575 576 577 578 579 580 581 582

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	return ret;
}
#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
583 584 585 586 587 588 589 590 591

#ifdef CONFIG_SYSPROF_TRACER
int
trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
{
	unsigned long count;
	int ret;

	/* start the tracing */
592 593 594 595 596 597
	ret = trace->init(tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return 0;
	}

598 599 600
	/* Sleep for a 1/10 of a second */
	msleep(100);
	/* stop the tracing. */
601
	tracing_stop();
602 603 604
	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
605
	tracing_start();
606 607 608 609

	return ret;
}
#endif /* CONFIG_SYSPROF_TRACER */
610 611 612 613 614 615 616 617 618

#ifdef CONFIG_BRANCH_TRACER
int
trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
{
	unsigned long count;
	int ret;

	/* start the tracing */
619 620 621 622 623 624
	ret = trace->init(tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

625 626 627 628 629 630 631 632 633 634 635 636
	/* Sleep for a 1/10 of a second */
	msleep(100);
	/* stop the tracing. */
	tracing_stop();
	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
	tracing_start();

	return ret;
}
#endif /* CONFIG_BRANCH_TRACER */